Use generic SIMD intrinsics for AVX512BW cvtepi

This commit is contained in:
sayantn
2025-11-28 03:31:23 +05:30
parent 4c49a7a22f
commit 98a95454b0
@@ -10735,11 +10735,11 @@ pub fn _mm_maskz_cvtepi16_epi8(k: __mmask8, a: __m128i) -> __m128i {
#[cfg_attr(test, assert_instr(vpmovswb))]
pub fn _mm512_cvtsepi16_epi8(a: __m512i) -> __m256i {
unsafe {
transmute(vpmovswb(
a.as_i16x32(),
i8x32::ZERO,
0b11111111_11111111_11111111_11111111,
simd_cast::<_, i8x32>(simd_imax(
simd_imin(a.as_i16x32(), i16x32::splat(i8::MAX as _)),
i16x32::splat(i8::MIN as _),
))
.as_m256i()
}
}
@@ -10751,7 +10751,9 @@ pub fn _mm512_cvtsepi16_epi8(a: __m512i) -> __m256i {
#[stable(feature = "stdarch_x86_avx512", since = "1.89")]
#[cfg_attr(test, assert_instr(vpmovswb))]
pub fn _mm512_mask_cvtsepi16_epi8(src: __m256i, k: __mmask32, a: __m512i) -> __m256i {
unsafe { transmute(vpmovswb(a.as_i16x32(), src.as_i8x32(), k)) }
unsafe {
simd_select_bitmask(k, _mm512_cvtsepi16_epi8(a).as_i8x32(), src.as_i8x32()).as_m256i()
}
}
/// Convert packed signed 16-bit integers in a to packed 8-bit integers with signed saturation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
@@ -10762,7 +10764,7 @@ pub fn _mm512_mask_cvtsepi16_epi8(src: __m256i, k: __mmask32, a: __m512i) -> __m
#[stable(feature = "stdarch_x86_avx512", since = "1.89")]
#[cfg_attr(test, assert_instr(vpmovswb))]
pub fn _mm512_maskz_cvtsepi16_epi8(k: __mmask32, a: __m512i) -> __m256i {
unsafe { transmute(vpmovswb(a.as_i16x32(), i8x32::ZERO, k)) }
unsafe { simd_select_bitmask(k, _mm512_cvtsepi16_epi8(a).as_i8x32(), i8x32::ZERO).as_m256i() }
}
/// Convert packed signed 16-bit integers in a to packed 8-bit integers with signed saturation, and store the results in dst.
@@ -10773,7 +10775,13 @@ pub fn _mm512_maskz_cvtsepi16_epi8(k: __mmask32, a: __m512i) -> __m256i {
#[stable(feature = "stdarch_x86_avx512", since = "1.89")]
#[cfg_attr(test, assert_instr(vpmovswb))]
pub fn _mm256_cvtsepi16_epi8(a: __m256i) -> __m128i {
unsafe { transmute(vpmovswb256(a.as_i16x16(), i8x16::ZERO, 0b11111111_11111111)) }
unsafe {
simd_cast::<_, i8x16>(simd_imax(
simd_imin(a.as_i16x16(), i16x16::splat(i8::MAX as _)),
i16x16::splat(i8::MIN as _),
))
.as_m128i()
}
}
/// Convert packed signed 16-bit integers in a to packed 8-bit integers with signed saturation, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set).
@@ -10784,7 +10792,9 @@ pub fn _mm256_cvtsepi16_epi8(a: __m256i) -> __m128i {
#[stable(feature = "stdarch_x86_avx512", since = "1.89")]
#[cfg_attr(test, assert_instr(vpmovswb))]
pub fn _mm256_mask_cvtsepi16_epi8(src: __m128i, k: __mmask16, a: __m256i) -> __m128i {
unsafe { transmute(vpmovswb256(a.as_i16x16(), src.as_i8x16(), k)) }
unsafe {
simd_select_bitmask(k, _mm256_cvtsepi16_epi8(a).as_i8x16(), src.as_i8x16()).as_m128i()
}
}
/// Convert packed signed 16-bit integers in a to packed 8-bit integers with signed saturation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
@@ -10795,7 +10805,7 @@ pub fn _mm256_mask_cvtsepi16_epi8(src: __m128i, k: __mmask16, a: __m256i) -> __m
#[stable(feature = "stdarch_x86_avx512", since = "1.89")]
#[cfg_attr(test, assert_instr(vpmovswb))]
pub fn _mm256_maskz_cvtsepi16_epi8(k: __mmask16, a: __m256i) -> __m128i {
unsafe { transmute(vpmovswb256(a.as_i16x16(), i8x16::ZERO, k)) }
unsafe { simd_select_bitmask(k, _mm256_cvtsepi16_epi8(a).as_i8x16(), i8x16::ZERO).as_m128i() }
}
/// Convert packed signed 16-bit integers in a to packed 8-bit integers with signed saturation, and store the results in dst.
@@ -10840,11 +10850,7 @@ pub fn _mm_maskz_cvtsepi16_epi8(k: __mmask8, a: __m128i) -> __m128i {
#[cfg_attr(test, assert_instr(vpmovuswb))]
pub fn _mm512_cvtusepi16_epi8(a: __m512i) -> __m256i {
unsafe {
transmute(vpmovuswb(
a.as_u16x32(),
u8x32::ZERO,
0b11111111_11111111_11111111_11111111,
))
simd_cast::<_, u8x32>(simd_imin(a.as_u16x32(), u16x32::splat(u8::MAX as _))).as_m256i()
}
}
@@ -10856,7 +10862,9 @@ pub fn _mm512_cvtusepi16_epi8(a: __m512i) -> __m256i {
#[stable(feature = "stdarch_x86_avx512", since = "1.89")]
#[cfg_attr(test, assert_instr(vpmovuswb))]
pub fn _mm512_mask_cvtusepi16_epi8(src: __m256i, k: __mmask32, a: __m512i) -> __m256i {
unsafe { transmute(vpmovuswb(a.as_u16x32(), src.as_u8x32(), k)) }
unsafe {
simd_select_bitmask(k, _mm512_cvtusepi16_epi8(a).as_u8x32(), src.as_u8x32()).as_m256i()
}
}
/// Convert packed unsigned 16-bit integers in a to packed unsigned 8-bit integers with unsigned saturation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
@@ -10867,7 +10875,7 @@ pub fn _mm512_mask_cvtusepi16_epi8(src: __m256i, k: __mmask32, a: __m512i) -> __
#[stable(feature = "stdarch_x86_avx512", since = "1.89")]
#[cfg_attr(test, assert_instr(vpmovuswb))]
pub fn _mm512_maskz_cvtusepi16_epi8(k: __mmask32, a: __m512i) -> __m256i {
unsafe { transmute(vpmovuswb(a.as_u16x32(), u8x32::ZERO, k)) }
unsafe { simd_select_bitmask(k, _mm512_cvtusepi16_epi8(a).as_u8x32(), u8x32::ZERO).as_m256i() }
}
/// Convert packed unsigned 16-bit integers in a to packed unsigned 8-bit integers with unsigned saturation, and store the results in dst.
@@ -10879,11 +10887,7 @@ pub fn _mm512_maskz_cvtusepi16_epi8(k: __mmask32, a: __m512i) -> __m256i {
#[cfg_attr(test, assert_instr(vpmovuswb))]
pub fn _mm256_cvtusepi16_epi8(a: __m256i) -> __m128i {
unsafe {
transmute(vpmovuswb256(
a.as_u16x16(),
u8x16::ZERO,
0b11111111_11111111,
))
simd_cast::<_, u8x16>(simd_imin(a.as_u16x16(), u16x16::splat(u8::MAX as _))).as_m128i()
}
}
@@ -10895,7 +10899,9 @@ pub fn _mm256_cvtusepi16_epi8(a: __m256i) -> __m128i {
#[stable(feature = "stdarch_x86_avx512", since = "1.89")]
#[cfg_attr(test, assert_instr(vpmovuswb))]
pub fn _mm256_mask_cvtusepi16_epi8(src: __m128i, k: __mmask16, a: __m256i) -> __m128i {
unsafe { transmute(vpmovuswb256(a.as_u16x16(), src.as_u8x16(), k)) }
unsafe {
simd_select_bitmask(k, _mm256_cvtusepi16_epi8(a).as_u8x16(), src.as_u8x16()).as_m128i()
}
}
/// Convert packed unsigned 16-bit integers in a to packed unsigned 8-bit integers with unsigned saturation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set).
@@ -10906,7 +10912,7 @@ pub fn _mm256_mask_cvtusepi16_epi8(src: __m128i, k: __mmask16, a: __m256i) -> __
#[stable(feature = "stdarch_x86_avx512", since = "1.89")]
#[cfg_attr(test, assert_instr(vpmovuswb))]
pub fn _mm256_maskz_cvtusepi16_epi8(k: __mmask16, a: __m256i) -> __m128i {
unsafe { transmute(vpmovuswb256(a.as_u16x16(), u8x16::ZERO, k)) }
unsafe { simd_select_bitmask(k, _mm256_cvtusepi16_epi8(a).as_u8x16(), u8x16::ZERO).as_m128i() }
}
/// Convert packed unsigned 16-bit integers in a to packed unsigned 8-bit integers with unsigned saturation, and store the results in dst.
@@ -11592,7 +11598,9 @@ pub unsafe fn _mm_mask_cvtsepi16_storeu_epi8(mem_addr: *mut i8, k: __mmask8, a:
#[stable(feature = "stdarch_x86_avx512", since = "1.89")]
#[cfg_attr(test, assert_instr(vpmovwb))]
pub unsafe fn _mm512_mask_cvtepi16_storeu_epi8(mem_addr: *mut i8, k: __mmask32, a: __m512i) {
vpmovwbmem(mem_addr, a.as_i16x32(), k);
let result = _mm512_cvtepi16_epi8(a).as_i8x32();
let mask = simd_select_bitmask(k, i8x32::splat(!0), i8x32::ZERO);
simd_masked_store!(SimdAlign::Unaligned, mask, mem_addr, result);
}
/// Convert packed 16-bit integers in a to packed 8-bit integers with truncation, and store the active results (those with their respective bit set in writemask k) to unaligned memory at base_addr.
@@ -11603,7 +11611,9 @@ pub unsafe fn _mm512_mask_cvtepi16_storeu_epi8(mem_addr: *mut i8, k: __mmask32,
#[stable(feature = "stdarch_x86_avx512", since = "1.89")]
#[cfg_attr(test, assert_instr(vpmovwb))]
pub unsafe fn _mm256_mask_cvtepi16_storeu_epi8(mem_addr: *mut i8, k: __mmask16, a: __m256i) {
vpmovwbmem256(mem_addr, a.as_i16x16(), k);
let result = _mm256_cvtepi16_epi8(a).as_i8x16();
let mask = simd_select_bitmask(k, i8x16::splat(!0), i8x16::ZERO);
simd_masked_store!(SimdAlign::Unaligned, mask, mem_addr, result);
}
/// Convert packed 16-bit integers in a to packed 8-bit integers with truncation, and store the active results (those with their respective bit set in writemask k) to unaligned memory at base_addr.
@@ -11614,7 +11624,13 @@ pub unsafe fn _mm256_mask_cvtepi16_storeu_epi8(mem_addr: *mut i8, k: __mmask16,
#[stable(feature = "stdarch_x86_avx512", since = "1.89")]
#[cfg_attr(test, assert_instr(vpmovwb))]
pub unsafe fn _mm_mask_cvtepi16_storeu_epi8(mem_addr: *mut i8, k: __mmask8, a: __m128i) {
vpmovwbmem128(mem_addr, a.as_i16x8(), k);
let result: i8x8 = simd_shuffle!(
_mm_cvtepi16_epi8(a).as_i8x16(),
i8x16::ZERO,
[0, 1, 2, 3, 4, 5, 6, 7]
);
let mask = simd_select_bitmask(k, i8x8::splat(!0), i8x8::ZERO);
simd_masked_store!(SimdAlign::Unaligned, mask, mem_addr, result);
}
/// Convert packed unsigned 16-bit integers in a to packed unsigned 8-bit integers with unsigned saturation, and store the active results (those with their respective bit set in writemask k) to unaligned memory at base_addr.
@@ -11703,17 +11719,9 @@ pub unsafe fn _mm_mask_cvtusepi16_storeu_epi8(mem_addr: *mut i8, k: __mmask8, a:
#[link_name = "llvm.x86.avx512.dbpsadbw.128"]
fn vdbpsadbw128(a: u8x16, b: u8x16, imm8: i32) -> u16x8;
#[link_name = "llvm.x86.avx512.mask.pmovs.wb.512"]
fn vpmovswb(a: i16x32, src: i8x32, mask: u32) -> i8x32;
#[link_name = "llvm.x86.avx512.mask.pmovs.wb.256"]
fn vpmovswb256(a: i16x16, src: i8x16, mask: u16) -> i8x16;
#[link_name = "llvm.x86.avx512.mask.pmovs.wb.128"]
fn vpmovswb128(a: i16x8, src: i8x16, mask: u8) -> i8x16;
#[link_name = "llvm.x86.avx512.mask.pmovus.wb.512"]
fn vpmovuswb(a: u16x32, src: u8x32, mask: u32) -> u8x32;
#[link_name = "llvm.x86.avx512.mask.pmovus.wb.256"]
fn vpmovuswb256(a: u16x16, src: u8x16, mask: u16) -> u8x16;
#[link_name = "llvm.x86.avx512.mask.pmovus.wb.128"]
fn vpmovuswb128(a: u16x8, src: u8x16, mask: u8) -> u8x16;
@@ -11724,13 +11732,6 @@ pub unsafe fn _mm_mask_cvtusepi16_storeu_epi8(mem_addr: *mut i8, k: __mmask8, a:
#[link_name = "llvm.x86.avx512.mask.pmovs.wb.mem.128"]
fn vpmovswbmem128(mem_addr: *mut i8, a: i16x8, mask: u8);
#[link_name = "llvm.x86.avx512.mask.pmov.wb.mem.512"]
fn vpmovwbmem(mem_addr: *mut i8, a: i16x32, mask: u32);
#[link_name = "llvm.x86.avx512.mask.pmov.wb.mem.256"]
fn vpmovwbmem256(mem_addr: *mut i8, a: i16x16, mask: u16);
#[link_name = "llvm.x86.avx512.mask.pmov.wb.mem.128"]
fn vpmovwbmem128(mem_addr: *mut i8, a: i16x8, mask: u8);
#[link_name = "llvm.x86.avx512.mask.pmovus.wb.mem.512"]
fn vpmovuswbmem(mem_addr: *mut i8, a: i16x32, mask: u32);
#[link_name = "llvm.x86.avx512.mask.pmovus.wb.mem.256"]