update C language headers to LLVM 17

release/17.x branch, commit 8f4dd44097c9ae25dd203d5ac87f3b48f854bba8
This commit is contained in:
Andrew Kelley
2023-08-11 16:20:14 -07:00
parent 3ed40b1140
commit 1861036f3b
49 changed files with 8869 additions and 405 deletions
+191
View File
@@ -513,6 +513,197 @@ __device__ inline cuuint32_t __nvvm_get_smem_pointer(void *__ptr) {
return __nv_cvta_generic_to_shared_impl(__ptr);
}
} // extern "C"
#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 800
__device__ inline unsigned __reduce_add_sync(unsigned __mask,
unsigned __value) {
return __nvvm_redux_sync_add(__mask, __value);
}
__device__ inline unsigned __reduce_min_sync(unsigned __mask,
unsigned __value) {
return __nvvm_redux_sync_umin(__mask, __value);
}
__device__ inline unsigned __reduce_max_sync(unsigned __mask,
unsigned __value) {
return __nvvm_redux_sync_umax(__mask, __value);
}
__device__ inline int __reduce_min_sync(unsigned __mask, int __value) {
return __nvvm_redux_sync_min(__mask, __value);
}
__device__ inline int __reduce_max_sync(unsigned __mask, int __value) {
return __nvvm_redux_sync_max(__mask, __value);
}
__device__ inline unsigned __reduce_or_sync(unsigned __mask, unsigned __value) {
return __nvvm_redux_sync_or(__mask, __value);
}
__device__ inline unsigned __reduce_and_sync(unsigned __mask,
unsigned __value) {
return __nvvm_redux_sync_and(__mask, __value);
}
__device__ inline unsigned __reduce_xor_sync(unsigned __mask,
unsigned __value) {
return __nvvm_redux_sync_xor(__mask, __value);
}
__device__ inline void __nv_memcpy_async_shared_global_4(void *__dst,
const void *__src,
unsigned __src_size) {
__nvvm_cp_async_ca_shared_global_4(
(void __attribute__((address_space(3))) *)__dst,
(const void __attribute__((address_space(1))) *)__src, __src_size);
}
__device__ inline void __nv_memcpy_async_shared_global_8(void *__dst,
const void *__src,
unsigned __src_size) {
__nvvm_cp_async_ca_shared_global_8(
(void __attribute__((address_space(3))) *)__dst,
(const void __attribute__((address_space(1))) *)__src, __src_size);
}
__device__ inline void __nv_memcpy_async_shared_global_16(void *__dst,
const void *__src,
unsigned __src_size) {
__nvvm_cp_async_ca_shared_global_16(
(void __attribute__((address_space(3))) *)__dst,
(const void __attribute__((address_space(1))) *)__src, __src_size);
}
__device__ inline void *
__nv_associate_access_property(const void *__ptr, unsigned long long __prop) {
// TODO: it appears to provide compiler with some sort of a hint. We do not
// know what exactly it is supposed to do. However, CUDA headers suggest that
// just passing through __ptr should not affect correctness. They do so on
// pre-sm80 GPUs where this builtin is not available.
return (void*)__ptr;
}
#endif // !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 800
#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 900
__device__ inline unsigned __isCtaShared(const void *ptr) {
return __isShared(ptr);
}
__device__ inline unsigned __isClusterShared(const void *__ptr) {
return __nvvm_isspacep_shared_cluster(__ptr);
}
__device__ inline void *__cluster_map_shared_rank(const void *__ptr,
unsigned __rank) {
return __nvvm_mapa((void *)__ptr, __rank);
}
__device__ inline unsigned __cluster_query_shared_rank(const void *__ptr) {
return __nvvm_getctarank((void *)__ptr);
}
__device__ inline uint2
__cluster_map_shared_multicast(const void *__ptr,
unsigned int __cluster_cta_mask) {
return make_uint2((unsigned)__cvta_generic_to_shared(__ptr),
__cluster_cta_mask);
}
__device__ inline unsigned __clusterDimIsSpecified() {
return __nvvm_is_explicit_cluster();
}
__device__ inline dim3 __clusterDim() {
return dim3(__nvvm_read_ptx_sreg_cluster_nctaid_x(),
__nvvm_read_ptx_sreg_cluster_nctaid_y(),
__nvvm_read_ptx_sreg_cluster_nctaid_z());
}
__device__ inline dim3 __clusterRelativeBlockIdx() {
return dim3(__nvvm_read_ptx_sreg_cluster_ctaid_x(),
__nvvm_read_ptx_sreg_cluster_ctaid_y(),
__nvvm_read_ptx_sreg_cluster_ctaid_z());
}
__device__ inline dim3 __clusterGridDimInClusters() {
return dim3(__nvvm_read_ptx_sreg_nclusterid_x(),
__nvvm_read_ptx_sreg_nclusterid_y(),
__nvvm_read_ptx_sreg_nclusterid_z());
}
__device__ inline dim3 __clusterIdx() {
return dim3(__nvvm_read_ptx_sreg_clusterid_x(),
__nvvm_read_ptx_sreg_clusterid_y(),
__nvvm_read_ptx_sreg_clusterid_z());
}
__device__ inline unsigned __clusterRelativeBlockRank() {
return __nvvm_read_ptx_sreg_cluster_ctarank();
}
__device__ inline unsigned __clusterSizeInBlocks() {
return __nvvm_read_ptx_sreg_cluster_nctarank();
}
__device__ inline void __cluster_barrier_arrive() {
__nvvm_barrier_cluster_arrive();
}
__device__ inline void __cluster_barrier_arrive_relaxed() {
__nvvm_barrier_cluster_arrive_relaxed();
}
__device__ inline void __cluster_barrier_wait() {
__nvvm_barrier_cluster_wait();
}
__device__ inline void __threadfence_cluster() { __nvvm_fence_sc_cluster(); }
__device__ inline float2 atomicAdd(float2 *__ptr, float2 __val) {
float2 __ret;
__asm__("atom.add.v2.f32 {%0, %1}, [%2], {%3, %4};"
: "=f"(__ret.x), "=f"(__ret.y)
: "l"(__ptr), "f"(__val.x), "f"(__val.y));
return __ret;
}
__device__ inline float2 atomicAdd_block(float2 *__ptr, float2 __val) {
float2 __ret;
__asm__("atom.cta.add.v2.f32 {%0, %1}, [%2], {%3, %4};"
: "=f"(__ret.x), "=f"(__ret.y)
: "l"(__ptr), "f"(__val.x), "f"(__val.y));
return __ret;
}
__device__ inline float2 atomicAdd_system(float2 *__ptr, float2 __val) {
float2 __ret;
__asm__("atom.sys.add.v2.f32 {%0, %1}, [%2], {%3, %4};"
: "=f"(__ret.x), "=f"(__ret.y)
: "l"(__ptr), "f"(__val.x), "f"(__val.y));
return __ret;
}
__device__ inline float4 atomicAdd(float4 *__ptr, float4 __val) {
float4 __ret;
__asm__("atom.add.v4.f32 {%0, %1, %2, %3}, [%4], {%5, %6, %7, %8};"
: "=f"(__ret.x), "=f"(__ret.y), "=f"(__ret.z), "=f"(__ret.w)
: "l"(__ptr), "f"(__val.x), "f"(__val.y), "f"(__val.z), "f"(__val.w));
return __ret;
}
__device__ inline float4 atomicAdd_block(float4 *__ptr, float4 __val) {
float4 __ret;
__asm__(
"atom.cta.add.v4.f32 {%0, %1, %2, %3}, [%4], {%5, %6, %7, %8};"
: "=f"(__ret.x), "=f"(__ret.y), "=f"(__ret.z), "=f"(__ret.w)
: "l"(__ptr), "f"(__val.x), "f"(__val.y), "f"(__val.z), "f"(__val.w));
return __ret;
}
__device__ inline float4 atomicAdd_system(float4 *__ptr, float4 __val) {
float4 __ret;
__asm__(
"atom.sys.add.v4.f32 {%0, %1, %2, %3}, [%4], {%5, %6, %7, %8};"
: "=f"(__ret.x), "=f"(__ret.y), "=f"(__ret.z), "=f"(__ret.w)
: "l"(__ptr), "f"(__val.x), "f"(__val.y), "f"(__val.z), "f"(__val.w)
:);
return __ret;
}
#endif // !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 900
#endif // CUDA_VERSION >= 11000
#endif // defined(__CLANG_CUDA_INTRINSICS_H__)
+1 -1
View File
@@ -171,7 +171,7 @@ __DEVICE__ __CONSTEXPR__ bool signbit(double __x) { return ::__signbit(__x); }
// Other functions.
__DEVICE__ __CONSTEXPR__ _Float16 fma(_Float16 __x, _Float16 __y,
_Float16 __z) {
return __ocml_fma_f16(__x, __y, __z);
return __builtin_fmaf16(__x, __y, __z);
}
__DEVICE__ __CONSTEXPR__ _Float16 pow(_Float16 __base, int __iexp) {
return __ocml_pown_f16(__base, __iexp);
+34 -32
View File
@@ -10,6 +10,10 @@
#ifndef __CLANG_HIP_LIBDEVICE_DECLARES_H__
#define __CLANG_HIP_LIBDEVICE_DECLARES_H__
#if !defined(__HIPCC_RTC__) && __has_include("hip/hip_version.h")
#include "hip/hip_version.h"
#endif // __has_include("hip/hip_version.h")
#ifdef __cplusplus
extern "C" {
#endif
@@ -137,23 +141,6 @@ __device__ __attribute__((const)) float __ocml_fma_rte_f32(float, float, float);
__device__ __attribute__((const)) float __ocml_fma_rtn_f32(float, float, float);
__device__ __attribute__((const)) float __ocml_fma_rtp_f32(float, float, float);
__device__ __attribute__((const)) float __ocml_fma_rtz_f32(float, float, float);
__device__ inline __attribute__((const)) float
__llvm_amdgcn_cos_f32(float __x) {
return __builtin_amdgcn_cosf(__x);
}
__device__ inline __attribute__((const)) float
__llvm_amdgcn_rcp_f32(float __x) {
return __builtin_amdgcn_rcpf(__x);
}
__device__ inline __attribute__((const)) float
__llvm_amdgcn_rsq_f32(float __x) {
return __builtin_amdgcn_rsqf(__x);
}
__device__ inline __attribute__((const)) float
__llvm_amdgcn_sin_f32(float __x) {
return __builtin_amdgcn_sinf(__x);
}
// END INTRINSICS
// END FLOAT
@@ -277,15 +264,6 @@ __device__ __attribute__((const)) double __ocml_fma_rtp_f64(double, double,
__device__ __attribute__((const)) double __ocml_fma_rtz_f64(double, double,
double);
__device__ inline __attribute__((const)) double
__llvm_amdgcn_rcp_f64(double __x) {
return __builtin_amdgcn_rcp(__x);
}
__device__ inline __attribute__((const)) double
__llvm_amdgcn_rsq_f64(double __x) {
return __builtin_amdgcn_rsq(__x);
}
__device__ __attribute__((const)) _Float16 __ocml_ceil_f16(_Float16);
__device__ _Float16 __ocml_cos_f16(_Float16);
__device__ __attribute__((const)) _Float16 __ocml_cvtrtn_f16_f32(float);
@@ -305,7 +283,6 @@ __device__ __attribute__((const)) int __ocml_isnan_f16(_Float16);
__device__ __attribute__((pure)) _Float16 __ocml_log_f16(_Float16);
__device__ __attribute__((pure)) _Float16 __ocml_log10_f16(_Float16);
__device__ __attribute__((pure)) _Float16 __ocml_log2_f16(_Float16);
__device__ __attribute__((const)) _Float16 __llvm_amdgcn_rcp_f16(_Float16);
__device__ __attribute__((const)) _Float16 __ocml_rint_f16(_Float16);
__device__ __attribute__((const)) _Float16 __ocml_rsqrt_f16(_Float16);
__device__ _Float16 __ocml_sin_f16(_Float16);
@@ -316,8 +293,15 @@ __device__ __attribute__((pure)) _Float16 __ocml_pown_f16(_Float16, int);
typedef _Float16 __2f16 __attribute__((ext_vector_type(2)));
typedef short __2i16 __attribute__((ext_vector_type(2)));
// We need to match C99's bool and get an i1 in the IR.
#ifdef __cplusplus
typedef bool __ockl_bool;
#else
typedef _Bool __ockl_bool;
#endif
__device__ __attribute__((const)) float __ockl_fdot2(__2f16 a, __2f16 b,
float c, bool s);
float c, __ockl_bool s);
__device__ __attribute__((const)) __2f16 __ocml_ceil_2f16(__2f16);
__device__ __attribute__((const)) __2f16 __ocml_fabs_2f16(__2f16);
__device__ __2f16 __ocml_cos_2f16(__2f16);
@@ -332,11 +316,29 @@ __device__ __attribute__((const)) __2i16 __ocml_isnan_2f16(__2f16);
__device__ __attribute__((pure)) __2f16 __ocml_log_2f16(__2f16);
__device__ __attribute__((pure)) __2f16 __ocml_log10_2f16(__2f16);
__device__ __attribute__((pure)) __2f16 __ocml_log2_2f16(__2f16);
__device__ inline __2f16
__llvm_amdgcn_rcp_2f16(__2f16 __x) // Not currently exposed by ROCDL.
{
return (__2f16)(__llvm_amdgcn_rcp_f16(__x.x), __llvm_amdgcn_rcp_f16(__x.y));
#if HIP_VERSION_MAJOR * 100 + HIP_VERSION_MINOR >= 560
#define __DEPRECATED_SINCE_HIP_560(X) __attribute__((deprecated(X)))
#else
#define __DEPRECATED_SINCE_HIP_560(X)
#endif
// Deprecated, should be removed when rocm releases using it are no longer
// relevant.
__DEPRECATED_SINCE_HIP_560("use ((_Float16)1.0) / ")
__device__ inline _Float16 __llvm_amdgcn_rcp_f16(_Float16 x) {
return ((_Float16)1.0f) / x;
}
__DEPRECATED_SINCE_HIP_560("use ((__2f16)1.0) / ")
__device__ inline __2f16
__llvm_amdgcn_rcp_2f16(__2f16 __x)
{
return ((__2f16)1.0f) / __x;
}
#undef __DEPRECATED_SINCE_HIP_560
__device__ __attribute__((const)) __2f16 __ocml_rint_2f16(__2f16);
__device__ __attribute__((const)) __2f16 __ocml_rsqrt_2f16(__2f16);
__device__ __2f16 __ocml_sin_2f16(__2f16);
+56 -71
View File
@@ -182,10 +182,10 @@ __DEVICE__
float cbrtf(float __x) { return __ocml_cbrt_f32(__x); }
__DEVICE__
float ceilf(float __x) { return __ocml_ceil_f32(__x); }
float ceilf(float __x) { return __builtin_ceilf(__x); }
__DEVICE__
float copysignf(float __x, float __y) { return __ocml_copysign_f32(__x, __y); }
float copysignf(float __x, float __y) { return __builtin_copysignf(__x, __y); }
__DEVICE__
float cosf(float __x) { return __ocml_cos_f32(__x); }
@@ -221,10 +221,10 @@ __DEVICE__
float exp10f(float __x) { return __ocml_exp10_f32(__x); }
__DEVICE__
float exp2f(float __x) { return __ocml_exp2_f32(__x); }
float exp2f(float __x) { return __builtin_exp2f(__x); }
__DEVICE__
float expf(float __x) { return __ocml_exp_f32(__x); }
float expf(float __x) { return __builtin_expf(__x); }
__DEVICE__
float expm1f(float __x) { return __ocml_expm1_f32(__x); }
@@ -239,33 +239,25 @@ __DEVICE__
float fdividef(float __x, float __y) { return __x / __y; }
__DEVICE__
float floorf(float __x) { return __ocml_floor_f32(__x); }
float floorf(float __x) { return __builtin_floorf(__x); }
__DEVICE__
float fmaf(float __x, float __y, float __z) {
return __ocml_fma_f32(__x, __y, __z);
return __builtin_fmaf(__x, __y, __z);
}
__DEVICE__
float fmaxf(float __x, float __y) { return __ocml_fmax_f32(__x, __y); }
float fmaxf(float __x, float __y) { return __builtin_fmaxf(__x, __y); }
__DEVICE__
float fminf(float __x, float __y) { return __ocml_fmin_f32(__x, __y); }
float fminf(float __x, float __y) { return __builtin_fminf(__x, __y); }
__DEVICE__
float fmodf(float __x, float __y) { return __ocml_fmod_f32(__x, __y); }
__DEVICE__
float frexpf(float __x, int *__nptr) {
int __tmp;
#ifdef __OPENMP_AMDGCN__
#pragma omp allocate(__tmp) allocator(omp_thread_mem_alloc)
#endif
float __r =
__ocml_frexp_f32(__x, (__attribute__((address_space(5))) int *)&__tmp);
*__nptr = __tmp;
return __r;
return __builtin_frexpf(__x, __nptr);
}
__DEVICE__
@@ -275,13 +267,13 @@ __DEVICE__
int ilogbf(float __x) { return __ocml_ilogb_f32(__x); }
__DEVICE__
__RETURN_TYPE __finitef(float __x) { return __ocml_isfinite_f32(__x); }
__RETURN_TYPE __finitef(float __x) { return __builtin_isfinite(__x); }
__DEVICE__
__RETURN_TYPE __isinff(float __x) { return __ocml_isinf_f32(__x); }
__RETURN_TYPE __isinff(float __x) { return __builtin_isinf(__x); }
__DEVICE__
__RETURN_TYPE __isnanf(float __x) { return __ocml_isnan_f32(__x); }
__RETURN_TYPE __isnanf(float __x) { return __builtin_isnan(__x); }
__DEVICE__
float j0f(float __x) { return __ocml_j0_f32(__x); }
@@ -311,37 +303,37 @@ float jnf(int __n, float __x) { // TODO: we could use Ahmes multiplication
}
__DEVICE__
float ldexpf(float __x, int __e) { return __ocml_ldexp_f32(__x, __e); }
float ldexpf(float __x, int __e) { return __builtin_amdgcn_ldexpf(__x, __e); }
__DEVICE__
float lgammaf(float __x) { return __ocml_lgamma_f32(__x); }
__DEVICE__
long long int llrintf(float __x) { return __ocml_rint_f32(__x); }
long long int llrintf(float __x) { return __builtin_rintf(__x); }
__DEVICE__
long long int llroundf(float __x) { return __ocml_round_f32(__x); }
long long int llroundf(float __x) { return __builtin_roundf(__x); }
__DEVICE__
float log10f(float __x) { return __ocml_log10_f32(__x); }
float log10f(float __x) { return __builtin_log10f(__x); }
__DEVICE__
float log1pf(float __x) { return __ocml_log1p_f32(__x); }
__DEVICE__
float log2f(float __x) { return __ocml_log2_f32(__x); }
float log2f(float __x) { return __builtin_log2f(__x); }
__DEVICE__
float logbf(float __x) { return __ocml_logb_f32(__x); }
__DEVICE__
float logf(float __x) { return __ocml_log_f32(__x); }
float logf(float __x) { return __builtin_logf(__x); }
__DEVICE__
long int lrintf(float __x) { return __ocml_rint_f32(__x); }
long int lrintf(float __x) { return __builtin_rintf(__x); }
__DEVICE__
long int lroundf(float __x) { return __ocml_round_f32(__x); }
long int lroundf(float __x) { return __builtin_roundf(__x); }
__DEVICE__
float modff(float __x, float *__iptr) {
@@ -377,7 +369,7 @@ float nanf(const char *__tagp __attribute__((nonnull))) {
}
__DEVICE__
float nearbyintf(float __x) { return __ocml_nearbyint_f32(__x); }
float nearbyintf(float __x) { return __builtin_nearbyintf(__x); }
__DEVICE__
float nextafterf(float __x, float __y) {
@@ -443,7 +435,7 @@ __DEVICE__
float rhypotf(float __x, float __y) { return __ocml_rhypot_f32(__x, __y); }
__DEVICE__
float rintf(float __x) { return __ocml_rint_f32(__x); }
float rintf(float __x) { return __builtin_rintf(__x); }
__DEVICE__
float rnorm3df(float __x, float __y, float __z) {
@@ -468,22 +460,22 @@ float rnormf(int __dim,
}
__DEVICE__
float roundf(float __x) { return __ocml_round_f32(__x); }
float roundf(float __x) { return __builtin_roundf(__x); }
__DEVICE__
float rsqrtf(float __x) { return __ocml_rsqrt_f32(__x); }
__DEVICE__
float scalblnf(float __x, long int __n) {
return (__n < INT_MAX) ? __ocml_scalbn_f32(__x, __n)
return (__n < INT_MAX) ? __builtin_amdgcn_ldexpf(__x, __n)
: __ocml_scalb_f32(__x, __n);
}
__DEVICE__
float scalbnf(float __x, int __n) { return __ocml_scalbn_f32(__x, __n); }
float scalbnf(float __x, int __n) { return __builtin_amdgcn_ldexpf(__x, __n); }
__DEVICE__
__RETURN_TYPE __signbitf(float __x) { return __ocml_signbit_f32(__x); }
__RETURN_TYPE __signbitf(float __x) { return __builtin_signbitf(__x); }
__DEVICE__
void sincosf(float __x, float *__sinptr, float *__cosptr) {
@@ -529,7 +521,7 @@ __DEVICE__
float tgammaf(float __x) { return __ocml_tgamma_f32(__x); }
__DEVICE__
float truncf(float __x) { return __ocml_trunc_f32(__x); }
float truncf(float __x) { return __builtin_truncf(__x); }
__DEVICE__
float y0f(float __x) { return __ocml_y0_f32(__x); }
@@ -621,7 +613,7 @@ float __fmaf_rz(float __x, float __y, float __z) {
#else
__DEVICE__
float __fmaf_rn(float __x, float __y, float __z) {
return __ocml_fma_f32(__x, __y, __z);
return __builtin_fmaf(__x, __y, __z);
}
#endif
@@ -654,7 +646,7 @@ float __frcp_rn(float __x) { return 1.0f / __x; }
#endif
__DEVICE__
float __frsqrt_rn(float __x) { return __llvm_amdgcn_rsq_f32(__x); }
float __frsqrt_rn(float __x) { return __builtin_amdgcn_rsqf(__x); }
#if defined OCML_BASIC_ROUNDED_OPERATIONS
__DEVICE__
@@ -739,11 +731,11 @@ __DEVICE__
double cbrt(double __x) { return __ocml_cbrt_f64(__x); }
__DEVICE__
double ceil(double __x) { return __ocml_ceil_f64(__x); }
double ceil(double __x) { return __builtin_ceil(__x); }
__DEVICE__
double copysign(double __x, double __y) {
return __ocml_copysign_f64(__x, __y);
return __builtin_copysign(__x, __y);
}
__DEVICE__
@@ -795,32 +787,25 @@ __DEVICE__
double fdim(double __x, double __y) { return __ocml_fdim_f64(__x, __y); }
__DEVICE__
double floor(double __x) { return __ocml_floor_f64(__x); }
double floor(double __x) { return __builtin_floor(__x); }
__DEVICE__
double fma(double __x, double __y, double __z) {
return __ocml_fma_f64(__x, __y, __z);
return __builtin_fma(__x, __y, __z);
}
__DEVICE__
double fmax(double __x, double __y) { return __ocml_fmax_f64(__x, __y); }
double fmax(double __x, double __y) { return __builtin_fmax(__x, __y); }
__DEVICE__
double fmin(double __x, double __y) { return __ocml_fmin_f64(__x, __y); }
double fmin(double __x, double __y) { return __builtin_fmin(__x, __y); }
__DEVICE__
double fmod(double __x, double __y) { return __ocml_fmod_f64(__x, __y); }
__DEVICE__
double frexp(double __x, int *__nptr) {
int __tmp;
#ifdef __OPENMP_AMDGCN__
#pragma omp allocate(__tmp) allocator(omp_thread_mem_alloc)
#endif
double __r =
__ocml_frexp_f64(__x, (__attribute__((address_space(5))) int *)&__tmp);
*__nptr = __tmp;
return __r;
return __builtin_frexp(__x, __nptr);
}
__DEVICE__
@@ -830,13 +815,13 @@ __DEVICE__
int ilogb(double __x) { return __ocml_ilogb_f64(__x); }
__DEVICE__
__RETURN_TYPE __finite(double __x) { return __ocml_isfinite_f64(__x); }
__RETURN_TYPE __finite(double __x) { return __builtin_isfinite(__x); }
__DEVICE__
__RETURN_TYPE __isinf(double __x) { return __ocml_isinf_f64(__x); }
__RETURN_TYPE __isinf(double __x) { return __builtin_isinf(__x); }
__DEVICE__
__RETURN_TYPE __isnan(double __x) { return __ocml_isnan_f64(__x); }
__RETURN_TYPE __isnan(double __x) { return __builtin_isnan(__x); }
__DEVICE__
double j0(double __x) { return __ocml_j0_f64(__x); }
@@ -866,16 +851,16 @@ double jn(int __n, double __x) { // TODO: we could use Ahmes multiplication
}
__DEVICE__
double ldexp(double __x, int __e) { return __ocml_ldexp_f64(__x, __e); }
double ldexp(double __x, int __e) { return __builtin_amdgcn_ldexp(__x, __e); }
__DEVICE__
double lgamma(double __x) { return __ocml_lgamma_f64(__x); }
__DEVICE__
long long int llrint(double __x) { return __ocml_rint_f64(__x); }
long long int llrint(double __x) { return __builtin_rint(__x); }
__DEVICE__
long long int llround(double __x) { return __ocml_round_f64(__x); }
long long int llround(double __x) { return __builtin_round(__x); }
__DEVICE__
double log(double __x) { return __ocml_log_f64(__x); }
@@ -893,10 +878,10 @@ __DEVICE__
double logb(double __x) { return __ocml_logb_f64(__x); }
__DEVICE__
long int lrint(double __x) { return __ocml_rint_f64(__x); }
long int lrint(double __x) { return __builtin_rint(__x); }
__DEVICE__
long int lround(double __x) { return __ocml_round_f64(__x); }
long int lround(double __x) { return __builtin_round(__x); }
__DEVICE__
double modf(double __x, double *__iptr) {
@@ -940,7 +925,7 @@ double nan(const char *__tagp) {
}
__DEVICE__
double nearbyint(double __x) { return __ocml_nearbyint_f64(__x); }
double nearbyint(double __x) { return __builtin_nearbyint(__x); }
__DEVICE__
double nextafter(double __x, double __y) {
@@ -1006,7 +991,7 @@ __DEVICE__
double rhypot(double __x, double __y) { return __ocml_rhypot_f64(__x, __y); }
__DEVICE__
double rint(double __x) { return __ocml_rint_f64(__x); }
double rint(double __x) { return __builtin_rint(__x); }
__DEVICE__
double rnorm(int __dim,
@@ -1031,21 +1016,21 @@ double rnorm4d(double __x, double __y, double __z, double __w) {
}
__DEVICE__
double round(double __x) { return __ocml_round_f64(__x); }
double round(double __x) { return __builtin_round(__x); }
__DEVICE__
double rsqrt(double __x) { return __ocml_rsqrt_f64(__x); }
__DEVICE__
double scalbln(double __x, long int __n) {
return (__n < INT_MAX) ? __ocml_scalbn_f64(__x, __n)
return (__n < INT_MAX) ? __builtin_amdgcn_ldexp(__x, __n)
: __ocml_scalb_f64(__x, __n);
}
__DEVICE__
double scalbn(double __x, int __n) { return __ocml_scalbn_f64(__x, __n); }
double scalbn(double __x, int __n) { return __builtin_amdgcn_ldexp(__x, __n); }
__DEVICE__
__RETURN_TYPE __signbit(double __x) { return __ocml_signbit_f64(__x); }
__RETURN_TYPE __signbit(double __x) { return __builtin_signbit(__x); }
__DEVICE__
double sin(double __x) { return __ocml_sin_f64(__x); }
@@ -1091,7 +1076,7 @@ __DEVICE__
double tgamma(double __x) { return __ocml_tgamma_f64(__x); }
__DEVICE__
double trunc(double __x) { return __ocml_trunc_f64(__x); }
double trunc(double __x) { return __builtin_trunc(__x); }
__DEVICE__
double y0(double __x) { return __ocml_y0_f64(__x); }
@@ -1258,7 +1243,7 @@ double __fma_rz(double __x, double __y, double __z) {
#else
__DEVICE__
double __fma_rn(double __x, double __y, double __z) {
return __ocml_fma_f64(__x, __y, __z);
return __builtin_fma(__x, __y, __z);
}
#endif
// END INTRINSICS
@@ -1290,16 +1275,16 @@ __DEVICE__ int max(int __arg1, int __arg2) {
}
__DEVICE__
float max(float __x, float __y) { return fmaxf(__x, __y); }
float max(float __x, float __y) { return __builtin_fmaxf(__x, __y); }
__DEVICE__
double max(double __x, double __y) { return fmax(__x, __y); }
double max(double __x, double __y) { return __builtin_fmax(__x, __y); }
__DEVICE__
float min(float __x, float __y) { return fminf(__x, __y); }
float min(float __x, float __y) { return __builtin_fminf(__x, __y); }
__DEVICE__
double min(double __x, double __y) { return fmin(__x, __y); }
double min(double __x, double __y) { return __builtin_fmin(__x, __y); }
#if !defined(__HIPCC_RTC__) && !defined(__OPENMP_AMDGCN__)
__host__ inline static int min(int __arg1, int __arg2) {
+13
View File
@@ -80,12 +80,25 @@ extern "C" {
#if HIP_VERSION_MAJOR * 100 + HIP_VERSION_MINOR >= 405
extern "C" __device__ unsigned long long __ockl_dm_alloc(unsigned long long __size);
extern "C" __device__ void __ockl_dm_dealloc(unsigned long long __addr);
#if __has_feature(address_sanitizer)
extern "C" __device__ unsigned long long __asan_malloc_impl(unsigned long long __size, unsigned long long __pc);
extern "C" __device__ void __asan_free_impl(unsigned long long __addr, unsigned long long __pc);
__attribute__((noinline, weak)) __device__ void *malloc(__hip_size_t __size) {
unsigned long long __pc = (unsigned long long)__builtin_return_address(0);
return (void *)__asan_malloc_impl(__size, __pc);
}
__attribute__((noinline, weak)) __device__ void free(void *__ptr) {
unsigned long long __pc = (unsigned long long)__builtin_return_address(0);
__asan_free_impl((unsigned long long)__ptr, __pc);
}
#else
__attribute__((weak)) inline __device__ void *malloc(__hip_size_t __size) {
return (void *) __ockl_dm_alloc(__size);
}
__attribute__((weak)) inline __device__ void free(void *__ptr) {
__ockl_dm_dealloc((unsigned long long)__ptr);
}
#endif // __has_feature(address_sanitizer)
#else // HIP version check
#if __HIP_ENABLE_DEVICE_MALLOC__
__device__ void *__hip_malloc(__hip_size_t __size);
+179 -24
View File
@@ -17,56 +17,211 @@
/* Define the default attributes for the functions in this file. */
#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__))
/* Intrinsics that are available only if __ADX__ defined */
static __inline unsigned char __attribute__((__always_inline__, __nodebug__, __target__("adx")))
_addcarryx_u32(unsigned char __cf, unsigned int __x, unsigned int __y,
unsigned int *__p)
{
/* Use C++ inline semantics in C++, GNU inline for C mode. */
#if defined(__cplusplus)
#define __INLINE __inline
#else
#define __INLINE static __inline
#endif
#if defined(__cplusplus)
extern "C" {
#endif
/* Intrinsics that are available only if __ADX__ is defined. */
/// Adds unsigned 32-bit integers \a __x and \a __y, plus 0 or 1 as indicated
/// by the carry flag \a __cf. Stores the unsigned 32-bit sum in the memory
/// at \a __p, and returns the 8-bit carry-out (carry flag).
///
/// \code{.operation}
/// temp := (__cf == 0) ? 0 : 1
/// Store32(__p, __x + __y + temp)
/// result := CF
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c ADCX instruction.
///
/// \param __cf
/// The 8-bit unsigned carry flag; any non-zero value indicates carry.
/// \param __x
/// A 32-bit unsigned addend.
/// \param __y
/// A 32-bit unsigned addend.
/// \param __p
/// Pointer to memory for storing the sum.
/// \returns The 8-bit unsigned carry-out value.
__INLINE unsigned char
__attribute__((__always_inline__, __nodebug__, __target__("adx")))
_addcarryx_u32(unsigned char __cf, unsigned int __x, unsigned int __y,
unsigned int *__p) {
return __builtin_ia32_addcarryx_u32(__cf, __x, __y, __p);
}
#ifdef __x86_64__
static __inline unsigned char __attribute__((__always_inline__, __nodebug__, __target__("adx")))
_addcarryx_u64(unsigned char __cf, unsigned long long __x,
unsigned long long __y, unsigned long long *__p)
{
/// Adds unsigned 64-bit integers \a __x and \a __y, plus 0 or 1 as indicated
/// by the carry flag \a __cf. Stores the unsigned 64-bit sum in the memory
/// at \a __p, and returns the 8-bit carry-out (carry flag).
///
/// \code{.operation}
/// temp := (__cf == 0) ? 0 : 1
/// Store64(__p, __x + __y + temp)
/// result := CF
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c ADCX instruction.
///
/// \param __cf
/// The 8-bit unsigned carry flag; any non-zero value indicates carry.
/// \param __x
/// A 64-bit unsigned addend.
/// \param __y
/// A 64-bit unsigned addend.
/// \param __p
/// Pointer to memory for storing the sum.
/// \returns The 8-bit unsigned carry-out value.
__INLINE unsigned char
__attribute__((__always_inline__, __nodebug__, __target__("adx")))
_addcarryx_u64(unsigned char __cf, unsigned long long __x,
unsigned long long __y, unsigned long long *__p) {
return __builtin_ia32_addcarryx_u64(__cf, __x, __y, __p);
}
#endif
/* Intrinsics that are also available if __ADX__ undefined */
static __inline unsigned char __DEFAULT_FN_ATTRS
_addcarry_u32(unsigned char __cf, unsigned int __x, unsigned int __y,
unsigned int *__p)
{
/* Intrinsics that are also available if __ADX__ is undefined. */
/// Adds unsigned 32-bit integers \a __x and \a __y, plus 0 or 1 as indicated
/// by the carry flag \a __cf. Stores the unsigned 32-bit sum in the memory
/// at \a __p, and returns the 8-bit carry-out (carry flag).
///
/// \code{.operation}
/// temp := (__cf == 0) ? 0 : 1
/// Store32(__p, __x + __y + temp)
/// result := CF
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c ADC instruction.
///
/// \param __cf
/// The 8-bit unsigned carry flag; any non-zero value indicates carry.
/// \param __x
/// A 32-bit unsigned addend.
/// \param __y
/// A 32-bit unsigned addend.
/// \param __p
/// Pointer to memory for storing the sum.
/// \returns The 8-bit unsigned carry-out value.
__INLINE unsigned char __DEFAULT_FN_ATTRS _addcarry_u32(unsigned char __cf,
unsigned int __x,
unsigned int __y,
unsigned int *__p) {
return __builtin_ia32_addcarryx_u32(__cf, __x, __y, __p);
}
#ifdef __x86_64__
static __inline unsigned char __DEFAULT_FN_ATTRS
/// Adds unsigned 64-bit integers \a __x and \a __y, plus 0 or 1 as indicated
/// by the carry flag \a __cf. Stores the unsigned 64-bit sum in the memory
/// at \a __p, and returns the 8-bit carry-out (carry flag).
///
/// \code{.operation}
/// temp := (__cf == 0) ? 0 : 1
/// Store64(__p, __x + __y + temp)
/// result := CF
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c ADC instruction.
///
/// \param __cf
/// The 8-bit unsigned carry flag; any non-zero value indicates carry.
/// \param __x
/// A 64-bit unsigned addend.
/// \param __y
/// A 64-bit unsigned addend.
/// \param __p
/// Pointer to memory for storing the sum.
/// \returns The 8-bit unsigned carry-out value.
__INLINE unsigned char __DEFAULT_FN_ATTRS
_addcarry_u64(unsigned char __cf, unsigned long long __x,
unsigned long long __y, unsigned long long *__p)
{
unsigned long long __y, unsigned long long *__p) {
return __builtin_ia32_addcarryx_u64(__cf, __x, __y, __p);
}
#endif
static __inline unsigned char __DEFAULT_FN_ATTRS
_subborrow_u32(unsigned char __cf, unsigned int __x, unsigned int __y,
unsigned int *__p)
{
/// Adds unsigned 32-bit integer \a __y to 0 or 1 as indicated by the carry
/// flag \a __cf, and subtracts the result from unsigned 32-bit integer
/// \a __x. Stores the unsigned 32-bit difference in the memory at \a __p,
/// and returns the 8-bit carry-out (carry or overflow flag).
///
/// \code{.operation}
/// temp := (__cf == 0) ? 0 : 1
/// Store32(__p, __x - (__y + temp))
/// result := CF
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c SBB instruction.
///
/// \param __cf
/// The 8-bit unsigned carry flag; any non-zero value indicates carry.
/// \param __x
/// The 32-bit unsigned minuend.
/// \param __y
/// The 32-bit unsigned subtrahend.
/// \param __p
/// Pointer to memory for storing the difference.
/// \returns The 8-bit unsigned carry-out value.
__INLINE unsigned char __DEFAULT_FN_ATTRS _subborrow_u32(unsigned char __cf,
unsigned int __x,
unsigned int __y,
unsigned int *__p) {
return __builtin_ia32_subborrow_u32(__cf, __x, __y, __p);
}
#ifdef __x86_64__
static __inline unsigned char __DEFAULT_FN_ATTRS
/// Adds unsigned 64-bit integer \a __y to 0 or 1 as indicated by the carry
/// flag \a __cf, and subtracts the result from unsigned 64-bit integer
/// \a __x. Stores the unsigned 64-bit difference in the memory at \a __p,
/// and returns the 8-bit carry-out (carry or overflow flag).
///
/// \code{.operation}
/// temp := (__cf == 0) ? 0 : 1
/// Store64(__p, __x - (__y + temp))
/// result := CF
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c ADC instruction.
///
/// \param __cf
/// The 8-bit unsigned carry flag; any non-zero value indicates carry.
/// \param __x
/// The 64-bit unsigned minuend.
/// \param __y
/// The 64-bit unsigned subtrahend.
/// \param __p
/// Pointer to memory for storing the difference.
/// \returns The 8-bit unsigned carry-out value.
__INLINE unsigned char __DEFAULT_FN_ATTRS
_subborrow_u64(unsigned char __cf, unsigned long long __x,
unsigned long long __y, unsigned long long *__p)
{
unsigned long long __y, unsigned long long *__p) {
return __builtin_ia32_subborrow_u64(__cf, __x, __y, __p);
}
#endif
#if defined(__cplusplus)
}
#endif
#undef __DEFAULT_FN_ATTRS
#endif /* __ADXINTRIN_H */
+136 -124
View File
@@ -3202,71 +3202,79 @@ static __inline__ vector double __ATTRS_o_ai vec_cpsgn(vector double __a,
// the XL-compatible signatures are used for those functions.
#ifdef __XL_COMPAT_ALTIVEC__
#define vec_ctf(__a, __b) \
_Generic( \
(__a), vector int \
: (vector float)__builtin_altivec_vcfsx((vector int)(__a), (__b)), \
vector unsigned int \
: (vector float)__builtin_altivec_vcfux((vector unsigned int)(__a), \
(__b)), \
vector unsigned long long \
: (vector float)(__builtin_vsx_xvcvuxdsp( \
(vector unsigned long long)(__a)) * \
(vector float)(vector unsigned)((0x7f - (__b)) << 23)), \
vector signed long long \
: (vector float)(__builtin_vsx_xvcvsxdsp( \
(vector signed long long)(__a)) * \
(vector float)(vector unsigned)((0x7f - (__b)) << 23)))
_Generic((__a), \
vector int: (vector float)__builtin_altivec_vcfsx((vector int)(__a), \
((__b)&0x1F)), \
vector unsigned int: (vector float)__builtin_altivec_vcfux( \
(vector unsigned int)(__a), ((__b)&0x1F)), \
vector unsigned long long: ( \
vector float)(__builtin_vsx_xvcvuxdsp( \
(vector unsigned long long)(__a)) * \
(vector float)(vector unsigned)((0x7f - \
((__b)&0x1F)) \
<< 23)), \
vector signed long long: ( \
vector float)(__builtin_vsx_xvcvsxdsp( \
(vector signed long long)(__a)) * \
(vector float)(vector unsigned)((0x7f - \
((__b)&0x1F)) \
<< 23)))
#else // __XL_COMPAT_ALTIVEC__
#define vec_ctf(__a, __b) \
_Generic( \
(__a), vector int \
: (vector float)__builtin_altivec_vcfsx((vector int)(__a), (__b)), \
vector unsigned int \
: (vector float)__builtin_altivec_vcfux((vector unsigned int)(__a), \
(__b)), \
vector unsigned long long \
: (vector float)(__builtin_convertvector( \
(vector unsigned long long)(__a), vector double) * \
(vector double)(vector unsigned long long)((0x3ffULL - \
(__b)) \
<< 52)), \
vector signed long long \
: (vector float)(__builtin_convertvector((vector signed long long)(__a), \
vector double) * \
(vector double)(vector unsigned long long)((0x3ffULL - \
(__b)) \
<< 52)))
#define vec_ctf(__a, __b) \
_Generic( \
(__a), \
vector int: (vector float)__builtin_altivec_vcfsx((vector int)(__a), \
((__b)&0x1F)), \
vector unsigned int: (vector float)__builtin_altivec_vcfux( \
(vector unsigned int)(__a), ((__b)&0x1F)), \
vector unsigned long long: ( \
vector float)(__builtin_convertvector( \
(vector unsigned long long)(__a), vector double) * \
(vector double)(vector unsigned long long)((0x3ffULL - \
((__b)&0x1F)) \
<< 52)), \
vector signed long long: ( \
vector float)(__builtin_convertvector( \
(vector signed long long)(__a), vector double) * \
(vector double)(vector unsigned long long)((0x3ffULL - \
((__b)&0x1F)) \
<< 52)))
#endif // __XL_COMPAT_ALTIVEC__
#else
#define vec_ctf(__a, __b) \
_Generic((__a), vector int \
: (vector float)__builtin_altivec_vcfsx((vector int)(__a), (__b)), \
vector unsigned int \
: (vector float)__builtin_altivec_vcfux((vector unsigned int)(__a), \
(__b)))
_Generic((__a), \
vector int: (vector float)__builtin_altivec_vcfsx((vector int)(__a), \
((__b)&0x1F)), \
vector unsigned int: (vector float)__builtin_altivec_vcfux( \
(vector unsigned int)(__a), ((__b)&0x1F)))
#endif
/* vec_ctd */
#ifdef __VSX__
#define vec_ctd(__a, __b) \
_Generic((__a), vector signed int \
: (vec_doublee((vector signed int)(__a)) * \
(vector double)(vector unsigned long long)((0x3ffULL - (__b)) \
<< 52)), \
vector unsigned int \
: (vec_doublee((vector unsigned int)(__a)) * \
(vector double)(vector unsigned long long)((0x3ffULL - (__b)) \
<< 52)), \
vector unsigned long long \
: (__builtin_convertvector((vector unsigned long long)(__a), \
vector double) * \
(vector double)(vector unsigned long long)((0x3ffULL - (__b)) \
<< 52)), \
vector signed long long \
: (__builtin_convertvector((vector signed long long)(__a), \
vector double) * \
(vector double)(vector unsigned long long)((0x3ffULL - (__b)) \
<< 52)))
_Generic((__a), \
vector signed int: ( \
vec_doublee((vector signed int)(__a)) * \
(vector double)(vector unsigned long long)((0x3ffULL - \
((__b)&0x1F)) \
<< 52)), \
vector unsigned int: ( \
vec_doublee((vector unsigned int)(__a)) * \
(vector double)(vector unsigned long long)((0x3ffULL - \
((__b)&0x1F)) \
<< 52)), \
vector unsigned long long: ( \
__builtin_convertvector((vector unsigned long long)(__a), \
vector double) * \
(vector double)(vector unsigned long long)((0x3ffULL - \
((__b)&0x1F)) \
<< 52)), \
vector signed long long: ( \
__builtin_convertvector((vector signed long long)(__a), \
vector double) * \
(vector double)(vector unsigned long long)((0x3ffULL - \
((__b)&0x1F)) \
<< 52)))
#endif // __VSX__
/* vec_vcfsx */
@@ -3281,27 +3289,27 @@ static __inline__ vector double __ATTRS_o_ai vec_cpsgn(vector double __a,
#ifdef __VSX__
#ifdef __XL_COMPAT_ALTIVEC__
#define vec_cts(__a, __b) \
_Generic((__a), vector float \
: (vector signed int)__builtin_altivec_vctsxs((vector float)(__a), \
(__b)), \
vector double \
: __extension__({ \
_Generic((__a), \
vector float: (vector signed int)__builtin_altivec_vctsxs( \
(vector float)(__a), ((__b)&0x1F)), \
vector double: __extension__({ \
vector double __ret = \
(vector double)(__a) * \
(vector double)(vector unsigned long long)((0x3ffULL + (__b)) \
(vector double)(vector unsigned long long)((0x3ffULL + \
((__b)&0x1F)) \
<< 52); \
(vector signed long long)__builtin_vsx_xvcvdpsxws(__ret); \
}))
#else // __XL_COMPAT_ALTIVEC__
#define vec_cts(__a, __b) \
_Generic((__a), vector float \
: (vector signed int)__builtin_altivec_vctsxs((vector float)(__a), \
(__b)), \
vector double \
: __extension__({ \
_Generic((__a), \
vector float: (vector signed int)__builtin_altivec_vctsxs( \
(vector float)(__a), ((__b)&0x1F)), \
vector double: __extension__({ \
vector double __ret = \
(vector double)(__a) * \
(vector double)(vector unsigned long long)((0x3ffULL + (__b)) \
(vector double)(vector unsigned long long)((0x3ffULL + \
((__b)&0x1F)) \
<< 52); \
(vector signed long long)__builtin_convertvector( \
__ret, vector signed long long); \
@@ -3320,27 +3328,27 @@ static __inline__ vector double __ATTRS_o_ai vec_cpsgn(vector double __a,
#ifdef __VSX__
#ifdef __XL_COMPAT_ALTIVEC__
#define vec_ctu(__a, __b) \
_Generic((__a), vector float \
: (vector unsigned int)__builtin_altivec_vctuxs( \
(vector float)(__a), (__b)), \
vector double \
: __extension__({ \
_Generic((__a), \
vector float: (vector unsigned int)__builtin_altivec_vctuxs( \
(vector float)(__a), ((__b)&0x1F)), \
vector double: __extension__({ \
vector double __ret = \
(vector double)(__a) * \
(vector double)(vector unsigned long long)((0x3ffULL + __b) \
(vector double)(vector unsigned long long)((0x3ffULL + \
((__b)&0x1F)) \
<< 52); \
(vector unsigned long long)__builtin_vsx_xvcvdpuxws(__ret); \
}))
#else // __XL_COMPAT_ALTIVEC__
#define vec_ctu(__a, __b) \
_Generic((__a), vector float \
: (vector unsigned int)__builtin_altivec_vctuxs( \
(vector float)(__a), (__b)), \
vector double \
: __extension__({ \
_Generic((__a), \
vector float: (vector unsigned int)__builtin_altivec_vctuxs( \
(vector float)(__a), ((__b)&0x1F)), \
vector double: __extension__({ \
vector double __ret = \
(vector double)(__a) * \
(vector double)(vector unsigned long long)((0x3ffULL + __b) \
(vector double)(vector unsigned long long)((0x3ffULL + \
((__b)&0x1F)) \
<< 52); \
(vector unsigned long long)__builtin_convertvector( \
__ret, vector unsigned long long); \
@@ -3355,60 +3363,62 @@ static __inline__ vector double __ATTRS_o_ai vec_cpsgn(vector double __a,
#ifdef __VSX__
#define vec_ctsl(__a, __b) \
_Generic((__a), vector float \
: __extension__({ \
vector float __ret = \
(vector float)(__a) * \
(vector float)(vector unsigned)((0x7f + (__b)) << 23); \
__builtin_vsx_xvcvspsxds( \
__builtin_vsx_xxsldwi(__ret, __ret, 1)); \
}), \
vector double \
: __extension__({ \
vector double __ret = \
(vector double)(__a) * \
(vector double)(vector unsigned long long)((0x3ffULL + __b) \
<< 52); \
__builtin_convertvector(__ret, vector signed long long); \
}))
_Generic( \
(__a), vector float \
: __extension__({ \
vector float __ret = \
(vector float)(__a) * \
(vector float)(vector unsigned)((0x7f + ((__b)&0x1F)) << 23); \
__builtin_vsx_xvcvspsxds(__builtin_vsx_xxsldwi(__ret, __ret, 1)); \
}), \
vector double \
: __extension__({ \
vector double __ret = \
(vector double)(__a) * \
(vector double)(vector unsigned long long)((0x3ffULL + \
((__b)&0x1F)) \
<< 52); \
__builtin_convertvector(__ret, vector signed long long); \
}))
/* vec_ctul */
#define vec_ctul(__a, __b) \
_Generic((__a), vector float \
: __extension__({ \
vector float __ret = \
(vector float)(__a) * \
(vector float)(vector unsigned)((0x7f + (__b)) << 23); \
__builtin_vsx_xvcvspuxds( \
__builtin_vsx_xxsldwi(__ret, __ret, 1)); \
}), \
vector double \
: __extension__({ \
vector double __ret = \
(vector double)(__a) * \
(vector double)(vector unsigned long long)((0x3ffULL + __b) \
<< 52); \
__builtin_convertvector(__ret, vector unsigned long long); \
}))
_Generic( \
(__a), vector float \
: __extension__({ \
vector float __ret = \
(vector float)(__a) * \
(vector float)(vector unsigned)((0x7f + ((__b)&0x1F)) << 23); \
__builtin_vsx_xvcvspuxds(__builtin_vsx_xxsldwi(__ret, __ret, 1)); \
}), \
vector double \
: __extension__({ \
vector double __ret = \
(vector double)(__a) * \
(vector double)(vector unsigned long long)((0x3ffULL + \
((__b)&0x1F)) \
<< 52); \
__builtin_convertvector(__ret, vector unsigned long long); \
}))
#endif
#else // __LITTLE_ENDIAN__
/* vec_ctsl */
#ifdef __VSX__
#define vec_ctsl(__a, __b) \
_Generic((__a), vector float \
: __extension__({ \
vector float __ret = \
(vector float)(__a) * \
(vector float)(vector unsigned)((0x7f + (__b)) << 23); \
__builtin_vsx_xvcvspsxds(__ret); \
}), \
vector double \
: __extension__({ \
_Generic((__a), \
vector float: __extension__({ \
vector float __ret = \
(vector float)(__a) * \
(vector float)(vector unsigned)((0x7f + ((__b)&0x1F)) << 23); \
__builtin_vsx_xvcvspsxds(__ret); \
}), \
vector double: __extension__({ \
vector double __ret = \
(vector double)(__a) * \
(vector double)(vector unsigned long long)((0x3ffULL + __b) \
(vector double)(vector unsigned long long)((0x3ffULL + \
((__b)&0x1F)) \
<< 52); \
__builtin_convertvector(__ret, vector signed long long); \
}))
@@ -3420,14 +3430,16 @@ static __inline__ vector double __ATTRS_o_ai vec_cpsgn(vector double __a,
: __extension__({ \
vector float __ret = \
(vector float)(__a) * \
(vector float)(vector unsigned)((0x7f + (__b)) << 23); \
(vector float)(vector unsigned)((0x7f + ((__b)&0x1F)) \
<< 23); \
__builtin_vsx_xvcvspuxds(__ret); \
}), \
vector double \
: __extension__({ \
vector double __ret = \
(vector double)(__a) * \
(vector double)(vector unsigned long long)((0x3ffULL + __b) \
(vector double)(vector unsigned long long)((0x3ffULL + \
((__b)&0x1F)) \
<< 52); \
__builtin_convertvector(__ret, vector unsigned long long); \
}))
+169
View File
@@ -0,0 +1,169 @@
/*===--------- amxcomplexintrin.h - AMXCOMPLEX intrinsics -*- C++ -*---------===
*
* Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
* See https://llvm.org/LICENSE.txt for license information.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*
*===------------------------------------------------------------------------===
*/
#ifndef __IMMINTRIN_H
#error "Never use <amxcomplexintrin.h> directly; include <immintrin.h> instead."
#endif // __IMMINTRIN_H
#ifndef __AMX_COMPLEXINTRIN_H
#define __AMX_COMPLEXINTRIN_H
#ifdef __x86_64__
#define __DEFAULT_FN_ATTRS_COMPLEX \
__attribute__((__always_inline__, __nodebug__, __target__("amx-complex")))
/// Perform matrix multiplication of two tiles containing complex elements and
/// accumulate the results into a packed single precision tile. Each dword
/// element in input tiles \a a and \a b is interpreted as a complex number
/// with FP16 real part and FP16 imaginary part.
/// Calculates the imaginary part of the result. For each possible combination
/// of (row of \a a, column of \a b), it performs a set of multiplication
/// and accumulations on all corresponding complex numbers (one from \a a
/// and one from \a b). The imaginary part of the \a a element is multiplied
/// with the real part of the corresponding \a b element, and the real part
/// of the \a a element is multiplied with the imaginary part of the
/// corresponding \a b elements. The two accumulated results are added, and
/// then accumulated into the corresponding row and column of \a dst.
///
/// \headerfile <x86intrin.h>
///
/// \code
/// void _tile_cmmimfp16ps(__tile dst, __tile a, __tile b);
/// \endcode
///
/// \code{.operation}
/// FOR m := 0 TO dst.rows - 1
/// tmp := dst.row[m]
/// FOR k := 0 TO (a.colsb / 4) - 1
/// FOR n := 0 TO (dst.colsb / 4) - 1
/// tmp.fp32[n] += FP32(a.row[m].fp16[2*k+0]) * FP32(b.row[k].fp16[2*n+1])
/// tmp.fp32[n] += FP32(a.row[m].fp16[2*k+1]) * FP32(b.row[k].fp16[2*n+0])
/// ENDFOR
/// ENDFOR
/// write_row_and_zero(dst, m, tmp, dst.colsb)
/// ENDFOR
/// zero_upper_rows(dst, dst.rows)
/// zero_tileconfig_start()
/// \endcode
///
/// This intrinsic corresponds to the \c TCMMIMFP16PS instruction.
///
/// \param dst
/// The destination tile. Max size is 1024 Bytes.
/// \param a
/// The 1st source tile. Max size is 1024 Bytes.
/// \param b
/// The 2nd source tile. Max size is 1024 Bytes.
#define _tile_cmmimfp16ps(dst, a, b) __builtin_ia32_tcmmimfp16ps(dst, a, b)
/// Perform matrix multiplication of two tiles containing complex elements and
/// accumulate the results into a packed single precision tile. Each dword
/// element in input tiles \a a and \a b is interpreted as a complex number
/// with FP16 real part and FP16 imaginary part.
/// Calculates the real part of the result. For each possible combination
/// of (row of \a a, column of \a b), it performs a set of multiplication
/// and accumulations on all corresponding complex numbers (one from \a a
/// and one from \a b). The real part of the \a a element is multiplied
/// with the real part of the corresponding \a b element, and the negated
/// imaginary part of the \a a element is multiplied with the imaginary
/// part of the corresponding \a b elements. The two accumulated results
/// are added, and then accumulated into the corresponding row and column
/// of \a dst.
///
/// \headerfile <x86intrin.h>
///
/// \code
/// void _tile_cmmrlfp16ps(__tile dst, __tile a, __tile b);
/// \endcode
///
/// \code{.operation}
/// FOR m := 0 TO dst.rows - 1
/// tmp := dst.row[m]
/// FOR k := 0 TO (a.colsb / 4) - 1
/// FOR n := 0 TO (dst.colsb / 4) - 1
/// tmp.fp32[n] += FP32(a.row[m].fp16[2*k+0]) * FP32(b.row[k].fp16[2*n+0])
/// tmp.fp32[n] += FP32(-a.row[m].fp16[2*k+1]) * FP32(b.row[k].fp16[2*n+1])
/// ENDFOR
/// ENDFOR
/// write_row_and_zero(dst, m, tmp, dst.colsb)
/// ENDFOR
/// zero_upper_rows(dst, dst.rows)
/// zero_tileconfig_start()
/// \endcode
///
/// This intrinsic corresponds to the \c TCMMIMFP16PS instruction.
///
/// \param dst
/// The destination tile. Max size is 1024 Bytes.
/// \param a
/// The 1st source tile. Max size is 1024 Bytes.
/// \param b
/// The 2nd source tile. Max size is 1024 Bytes.
#define _tile_cmmrlfp16ps(dst, a, b) __builtin_ia32_tcmmrlfp16ps(dst, a, b)
static __inline__ _tile1024i __DEFAULT_FN_ATTRS_COMPLEX
_tile_cmmimfp16ps_internal(unsigned short m, unsigned short n, unsigned short k,
_tile1024i dst, _tile1024i src1, _tile1024i src2) {
return __builtin_ia32_tcmmimfp16ps_internal(m, n, k, dst, src1, src2);
}
static __inline__ _tile1024i __DEFAULT_FN_ATTRS_COMPLEX
_tile_cmmrlfp16ps_internal(unsigned short m, unsigned short n, unsigned short k,
_tile1024i dst, _tile1024i src1, _tile1024i src2) {
return __builtin_ia32_tcmmrlfp16ps_internal(m, n, k, dst, src1, src2);
}
/// Perform matrix multiplication of two tiles containing complex elements and
/// accumulate the results into a packed single precision tile. Each dword
/// element in input tiles src0 and src1 is interpreted as a complex number with
/// FP16 real part and FP16 imaginary part.
/// This function calculates the imaginary part of the result.
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the <c> TCMMIMFP16PS </c> instruction.
///
/// \param dst
/// The destination tile. Max size is 1024 Bytes.
/// \param src0
/// The 1st source tile. Max size is 1024 Bytes.
/// \param src1
/// The 2nd source tile. Max size is 1024 Bytes.
__DEFAULT_FN_ATTRS_COMPLEX
static void __tile_cmmimfp16ps(__tile1024i *dst, __tile1024i src0,
__tile1024i src1) {
dst->tile = _tile_cmmimfp16ps_internal(src0.row, src1.col, src0.col,
dst->tile, src0.tile, src1.tile);
}
/// Perform matrix multiplication of two tiles containing complex elements and
/// accumulate the results into a packed single precision tile. Each dword
/// element in input tiles src0 and src1 is interpreted as a complex number with
/// FP16 real part and FP16 imaginary part.
/// This function calculates the real part of the result.
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the <c> TCMMRLFP16PS </c> instruction.
///
/// \param dst
/// The destination tile. Max size is 1024 Bytes.
/// \param src0
/// The 1st source tile. Max size is 1024 Bytes.
/// \param src1
/// The 2nd source tile. Max size is 1024 Bytes.
__DEFAULT_FN_ATTRS_COMPLEX
static void __tile_cmmrlfp16ps(__tile1024i *dst, __tile1024i src0,
__tile1024i src1) {
dst->tile = _tile_cmmrlfp16ps_internal(src0.row, src1.col, src0.col,
dst->tile, src0.tile, src1.tile);
}
#endif // __x86_64__
#endif // __AMX_COMPLEXINTRIN_H
+13 -9
View File
@@ -138,28 +138,32 @@ __rorl(unsigned long __x, uint32_t __y) {
/* CLZ */
static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
__clz(uint32_t __t) {
return (uint32_t)__builtin_clz(__t);
return __builtin_arm_clz(__t);
}
static __inline__ unsigned long __attribute__((__always_inline__, __nodebug__))
static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
__clzl(unsigned long __t) {
return (unsigned long)__builtin_clzl(__t);
#if __SIZEOF_LONG__ == 4
return __builtin_arm_clz(__t);
#else
return __builtin_arm_clz64(__t);
#endif
}
static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__))
static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
__clzll(uint64_t __t) {
return (uint64_t)__builtin_clzll(__t);
return __builtin_arm_clz64(__t);
}
/* CLS */
static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
__cls(uint32_t __t) {
return __builtin_arm_cls(__t);
}
static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
__clsl(unsigned long __t) {
#if __SIZEOF_LONG__ == 4
return __builtin_arm_cls(__t);
@@ -168,7 +172,7 @@ __clsl(unsigned long __t) {
#endif
}
static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
__clsll(uint64_t __t) {
return __builtin_arm_cls64(__t);
}
+164 -1
View File
@@ -35,7 +35,6 @@
#include <stdint.h>
#include <arm_bf16.h>
typedef __bf16 bfloat16_t;
typedef float float32_t;
typedef __fp16 float16_t;
#ifdef __aarch64__
@@ -64938,6 +64937,170 @@ int8x16_t __reint_786 = __rev2_786; \
})
#endif
#define vldap1_lane_p64(__p0, __p1, __p2) __extension__ ({ \
poly64x1_t __ret; \
poly64x1_t __s1 = __p1; \
__ret = (poly64x1_t) __builtin_neon_vldap1_lane_p64(__p0, (int8x8_t)__s1, __p2, 6); \
__ret; \
})
#ifdef __LITTLE_ENDIAN__
#define vldap1q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
poly64x2_t __ret; \
poly64x2_t __s1 = __p1; \
__ret = (poly64x2_t) __builtin_neon_vldap1q_lane_p64(__p0, (int8x16_t)__s1, __p2, 38); \
__ret; \
})
#else
#define vldap1q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
poly64x2_t __ret; \
poly64x2_t __s1 = __p1; \
poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
__ret = (poly64x2_t) __builtin_neon_vldap1q_lane_p64(__p0, (int8x16_t)__rev1, __p2, 38); \
__ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
__ret; \
})
#endif
#ifdef __LITTLE_ENDIAN__
#define vldap1q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
uint64x2_t __ret; \
uint64x2_t __s1 = __p1; \
__ret = (uint64x2_t) __builtin_neon_vldap1q_lane_u64(__p0, (int8x16_t)__s1, __p2, 51); \
__ret; \
})
#else
#define vldap1q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
uint64x2_t __ret; \
uint64x2_t __s1 = __p1; \
uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
__ret = (uint64x2_t) __builtin_neon_vldap1q_lane_u64(__p0, (int8x16_t)__rev1, __p2, 51); \
__ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
__ret; \
})
#endif
#ifdef __LITTLE_ENDIAN__
#define vldap1q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
float64x2_t __ret; \
float64x2_t __s1 = __p1; \
__ret = (float64x2_t) __builtin_neon_vldap1q_lane_f64(__p0, (int8x16_t)__s1, __p2, 42); \
__ret; \
})
#else
#define vldap1q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
float64x2_t __ret; \
float64x2_t __s1 = __p1; \
float64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
__ret = (float64x2_t) __builtin_neon_vldap1q_lane_f64(__p0, (int8x16_t)__rev1, __p2, 42); \
__ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
__ret; \
})
#endif
#ifdef __LITTLE_ENDIAN__
#define vldap1q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
int64x2_t __ret; \
int64x2_t __s1 = __p1; \
__ret = (int64x2_t) __builtin_neon_vldap1q_lane_s64(__p0, (int8x16_t)__s1, __p2, 35); \
__ret; \
})
#else
#define vldap1q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
int64x2_t __ret; \
int64x2_t __s1 = __p1; \
int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
__ret = (int64x2_t) __builtin_neon_vldap1q_lane_s64(__p0, (int8x16_t)__rev1, __p2, 35); \
__ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
__ret; \
})
#endif
#define vldap1_lane_u64(__p0, __p1, __p2) __extension__ ({ \
uint64x1_t __ret; \
uint64x1_t __s1 = __p1; \
__ret = (uint64x1_t) __builtin_neon_vldap1_lane_u64(__p0, (int8x8_t)__s1, __p2, 19); \
__ret; \
})
#define vldap1_lane_f64(__p0, __p1, __p2) __extension__ ({ \
float64x1_t __ret; \
float64x1_t __s1 = __p1; \
__ret = (float64x1_t) __builtin_neon_vldap1_lane_f64(__p0, (int8x8_t)__s1, __p2, 10); \
__ret; \
})
#define vldap1_lane_s64(__p0, __p1, __p2) __extension__ ({ \
int64x1_t __ret; \
int64x1_t __s1 = __p1; \
__ret = (int64x1_t) __builtin_neon_vldap1_lane_s64(__p0, (int8x8_t)__s1, __p2, 3); \
__ret; \
})
#define vstl1_lane_p64(__p0, __p1, __p2) __extension__ ({ \
poly64x1_t __s1 = __p1; \
__builtin_neon_vstl1_lane_p64(__p0, (int8x8_t)__s1, __p2, 6); \
})
#ifdef __LITTLE_ENDIAN__
#define vstl1q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
poly64x2_t __s1 = __p1; \
__builtin_neon_vstl1q_lane_p64(__p0, (int8x16_t)__s1, __p2, 38); \
})
#else
#define vstl1q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
poly64x2_t __s1 = __p1; \
poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
__builtin_neon_vstl1q_lane_p64(__p0, (int8x16_t)__rev1, __p2, 38); \
})
#endif
#ifdef __LITTLE_ENDIAN__
#define vstl1q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
uint64x2_t __s1 = __p1; \
__builtin_neon_vstl1q_lane_u64(__p0, (int8x16_t)__s1, __p2, 51); \
})
#else
#define vstl1q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
uint64x2_t __s1 = __p1; \
uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
__builtin_neon_vstl1q_lane_u64(__p0, (int8x16_t)__rev1, __p2, 51); \
})
#endif
#ifdef __LITTLE_ENDIAN__
#define vstl1q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
float64x2_t __s1 = __p1; \
__builtin_neon_vstl1q_lane_f64(__p0, (int8x16_t)__s1, __p2, 42); \
})
#else
#define vstl1q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
float64x2_t __s1 = __p1; \
float64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
__builtin_neon_vstl1q_lane_f64(__p0, (int8x16_t)__rev1, __p2, 42); \
})
#endif
#ifdef __LITTLE_ENDIAN__
#define vstl1q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
int64x2_t __s1 = __p1; \
__builtin_neon_vstl1q_lane_s64(__p0, (int8x16_t)__s1, __p2, 35); \
})
#else
#define vstl1q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
int64x2_t __s1 = __p1; \
int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
__builtin_neon_vstl1q_lane_s64(__p0, (int8x16_t)__rev1, __p2, 35); \
})
#endif
#define vstl1_lane_u64(__p0, __p1, __p2) __extension__ ({ \
uint64x1_t __s1 = __p1; \
__builtin_neon_vstl1_lane_u64(__p0, (int8x8_t)__s1, __p2, 19); \
})
#define vstl1_lane_f64(__p0, __p1, __p2) __extension__ ({ \
float64x1_t __s1 = __p1; \
__builtin_neon_vstl1_lane_f64(__p0, (int8x8_t)__s1, __p2, 10); \
})
#define vstl1_lane_s64(__p0, __p1, __p2) __extension__ ({ \
int64x1_t __s1 = __p1; \
__builtin_neon_vstl1_lane_s64(__p0, (int8x8_t)__s1, __p2, 3); \
})
#ifdef __LITTLE_ENDIAN__
__ai __attribute__((target("sha3"))) uint8x16_t vbcaxq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
uint8x16_t __ret;
+642
View File
@@ -0,0 +1,642 @@
/*===---- arm_sme_draft_spec_subject_to_change.h - ARM SME intrinsics ------===
*
*
* Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
* See https://llvm.org/LICENSE.txt for license information.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*
*===-----------------------------------------------------------------------===
*/
#ifndef __ARM_SME_H
#define __ARM_SME_H
#if !defined(__LITTLE_ENDIAN__)
#error "Big endian is currently not supported for arm_sme_draft_spec_subject_to_change.h"
#endif
#include <arm_sve.h>
/* Function attributes */
#define __ai static __inline__ __attribute__((__always_inline__, __nodebug__))
#define __aio static __inline__ __attribute__((__always_inline__, __nodebug__, __overloadable__))
#ifdef __cplusplus
extern "C" {
#endif
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svaddha_za32_u32_m), arm_streaming, arm_shared_za))
void svaddha_za32_u32_m(uint64_t, svbool_t, svbool_t, svuint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svaddha_za32_s32_m), arm_streaming, arm_shared_za))
void svaddha_za32_s32_m(uint64_t, svbool_t, svbool_t, svint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svaddva_za32_u32_m), arm_streaming, arm_shared_za))
void svaddva_za32_u32_m(uint64_t, svbool_t, svbool_t, svuint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svaddva_za32_s32_m), arm_streaming, arm_shared_za))
void svaddva_za32_s32_m(uint64_t, svbool_t, svbool_t, svint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svcntsb), arm_streaming_compatible, arm_preserves_za))
uint64_t svcntsb(void);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svcntsd), arm_streaming_compatible, arm_preserves_za))
uint64_t svcntsd(void);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svcntsh), arm_streaming_compatible, arm_preserves_za))
uint64_t svcntsh(void);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svcntsw), arm_streaming_compatible, arm_preserves_za))
uint64_t svcntsw(void);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svld1_hor_vnum_za128), arm_streaming, arm_shared_za))
void svld1_hor_vnum_za128(uint64_t, uint32_t, uint64_t, svbool_t, void const *, int64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svld1_hor_vnum_za16), arm_streaming, arm_shared_za))
void svld1_hor_vnum_za16(uint64_t, uint32_t, uint64_t, svbool_t, void const *, int64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svld1_hor_vnum_za32), arm_streaming, arm_shared_za))
void svld1_hor_vnum_za32(uint64_t, uint32_t, uint64_t, svbool_t, void const *, int64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svld1_hor_vnum_za64), arm_streaming, arm_shared_za))
void svld1_hor_vnum_za64(uint64_t, uint32_t, uint64_t, svbool_t, void const *, int64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svld1_hor_vnum_za8), arm_streaming, arm_shared_za))
void svld1_hor_vnum_za8(uint64_t, uint32_t, uint64_t, svbool_t, void const *, int64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svld1_hor_za128), arm_streaming, arm_shared_za))
void svld1_hor_za128(uint64_t, uint32_t, uint64_t, svbool_t, void const *);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svld1_hor_za16), arm_streaming, arm_shared_za))
void svld1_hor_za16(uint64_t, uint32_t, uint64_t, svbool_t, void const *);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svld1_hor_za32), arm_streaming, arm_shared_za))
void svld1_hor_za32(uint64_t, uint32_t, uint64_t, svbool_t, void const *);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svld1_hor_za64), arm_streaming, arm_shared_za))
void svld1_hor_za64(uint64_t, uint32_t, uint64_t, svbool_t, void const *);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svld1_hor_za8), arm_streaming, arm_shared_za))
void svld1_hor_za8(uint64_t, uint32_t, uint64_t, svbool_t, void const *);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svld1_ver_vnum_za128), arm_streaming, arm_shared_za))
void svld1_ver_vnum_za128(uint64_t, uint32_t, uint64_t, svbool_t, void const *, int64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svld1_ver_vnum_za16), arm_streaming, arm_shared_za))
void svld1_ver_vnum_za16(uint64_t, uint32_t, uint64_t, svbool_t, void const *, int64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svld1_ver_vnum_za32), arm_streaming, arm_shared_za))
void svld1_ver_vnum_za32(uint64_t, uint32_t, uint64_t, svbool_t, void const *, int64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svld1_ver_vnum_za64), arm_streaming, arm_shared_za))
void svld1_ver_vnum_za64(uint64_t, uint32_t, uint64_t, svbool_t, void const *, int64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svld1_ver_vnum_za8), arm_streaming, arm_shared_za))
void svld1_ver_vnum_za8(uint64_t, uint32_t, uint64_t, svbool_t, void const *, int64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svld1_ver_za128), arm_streaming, arm_shared_za))
void svld1_ver_za128(uint64_t, uint32_t, uint64_t, svbool_t, void const *);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svld1_ver_za16), arm_streaming, arm_shared_za))
void svld1_ver_za16(uint64_t, uint32_t, uint64_t, svbool_t, void const *);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svld1_ver_za32), arm_streaming, arm_shared_za))
void svld1_ver_za32(uint64_t, uint32_t, uint64_t, svbool_t, void const *);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svld1_ver_za64), arm_streaming, arm_shared_za))
void svld1_ver_za64(uint64_t, uint32_t, uint64_t, svbool_t, void const *);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svld1_ver_za8), arm_streaming, arm_shared_za))
void svld1_ver_za8(uint64_t, uint32_t, uint64_t, svbool_t, void const *);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmopa_za32_f16_m), arm_streaming, arm_shared_za))
void svmopa_za32_f16_m(uint64_t, svbool_t, svbool_t, svfloat16_t, svfloat16_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmopa_za32_bf16_m), arm_streaming, arm_shared_za))
void svmopa_za32_bf16_m(uint64_t, svbool_t, svbool_t, svbfloat16_t, svbfloat16_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmopa_za32_f32_m), arm_streaming, arm_shared_za))
void svmopa_za32_f32_m(uint64_t, svbool_t, svbool_t, svfloat32_t, svfloat32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmopa_za32_s8_m), arm_streaming, arm_shared_za))
void svmopa_za32_s8_m(uint64_t, svbool_t, svbool_t, svint8_t, svint8_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmopa_za32_u8_m), arm_streaming, arm_shared_za))
void svmopa_za32_u8_m(uint64_t, svbool_t, svbool_t, svuint8_t, svuint8_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmops_za32_f16_m), arm_streaming, arm_shared_za))
void svmops_za32_f16_m(uint64_t, svbool_t, svbool_t, svfloat16_t, svfloat16_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmops_za32_bf16_m), arm_streaming, arm_shared_za))
void svmops_za32_bf16_m(uint64_t, svbool_t, svbool_t, svbfloat16_t, svbfloat16_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmops_za32_f32_m), arm_streaming, arm_shared_za))
void svmops_za32_f32_m(uint64_t, svbool_t, svbool_t, svfloat32_t, svfloat32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmops_za32_s8_m), arm_streaming, arm_shared_za))
void svmops_za32_s8_m(uint64_t, svbool_t, svbool_t, svint8_t, svint8_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmops_za32_u8_m), arm_streaming, arm_shared_za))
void svmops_za32_u8_m(uint64_t, svbool_t, svbool_t, svuint8_t, svuint8_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za128_u8_m), arm_streaming, arm_shared_za, arm_preserves_za))
svuint8_t svread_hor_za128_u8_m(svuint8_t, svbool_t, uint64_t, uint32_t, uint64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za128_u32_m), arm_streaming, arm_shared_za, arm_preserves_za))
svuint32_t svread_hor_za128_u32_m(svuint32_t, svbool_t, uint64_t, uint32_t, uint64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za128_u64_m), arm_streaming, arm_shared_za, arm_preserves_za))
svuint64_t svread_hor_za128_u64_m(svuint64_t, svbool_t, uint64_t, uint32_t, uint64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za128_u16_m), arm_streaming, arm_shared_za, arm_preserves_za))
svuint16_t svread_hor_za128_u16_m(svuint16_t, svbool_t, uint64_t, uint32_t, uint64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za128_bf16_m), arm_streaming, arm_shared_za, arm_preserves_za))
svbfloat16_t svread_hor_za128_bf16_m(svbfloat16_t, svbool_t, uint64_t, uint32_t, uint64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za128_s8_m), arm_streaming, arm_shared_za, arm_preserves_za))
svint8_t svread_hor_za128_s8_m(svint8_t, svbool_t, uint64_t, uint32_t, uint64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za128_f64_m), arm_streaming, arm_shared_za, arm_preserves_za))
svfloat64_t svread_hor_za128_f64_m(svfloat64_t, svbool_t, uint64_t, uint32_t, uint64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za128_f32_m), arm_streaming, arm_shared_za, arm_preserves_za))
svfloat32_t svread_hor_za128_f32_m(svfloat32_t, svbool_t, uint64_t, uint32_t, uint64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za128_f16_m), arm_streaming, arm_shared_za, arm_preserves_za))
svfloat16_t svread_hor_za128_f16_m(svfloat16_t, svbool_t, uint64_t, uint32_t, uint64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za128_s32_m), arm_streaming, arm_shared_za, arm_preserves_za))
svint32_t svread_hor_za128_s32_m(svint32_t, svbool_t, uint64_t, uint32_t, uint64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za128_s64_m), arm_streaming, arm_shared_za, arm_preserves_za))
svint64_t svread_hor_za128_s64_m(svint64_t, svbool_t, uint64_t, uint32_t, uint64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za128_s16_m), arm_streaming, arm_shared_za, arm_preserves_za))
svint16_t svread_hor_za128_s16_m(svint16_t, svbool_t, uint64_t, uint32_t, uint64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za16_u16_m), arm_streaming, arm_shared_za, arm_preserves_za))
svuint16_t svread_hor_za16_u16_m(svuint16_t, svbool_t, uint64_t, uint32_t, uint64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za16_bf16_m), arm_streaming, arm_shared_za, arm_preserves_za))
svbfloat16_t svread_hor_za16_bf16_m(svbfloat16_t, svbool_t, uint64_t, uint32_t, uint64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za16_f16_m), arm_streaming, arm_shared_za, arm_preserves_za))
svfloat16_t svread_hor_za16_f16_m(svfloat16_t, svbool_t, uint64_t, uint32_t, uint64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za16_s16_m), arm_streaming, arm_shared_za, arm_preserves_za))
svint16_t svread_hor_za16_s16_m(svint16_t, svbool_t, uint64_t, uint32_t, uint64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za32_u32_m), arm_streaming, arm_shared_za, arm_preserves_za))
svuint32_t svread_hor_za32_u32_m(svuint32_t, svbool_t, uint64_t, uint32_t, uint64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za32_f32_m), arm_streaming, arm_shared_za, arm_preserves_za))
svfloat32_t svread_hor_za32_f32_m(svfloat32_t, svbool_t, uint64_t, uint32_t, uint64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za32_s32_m), arm_streaming, arm_shared_za, arm_preserves_za))
svint32_t svread_hor_za32_s32_m(svint32_t, svbool_t, uint64_t, uint32_t, uint64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za64_u64_m), arm_streaming, arm_shared_za, arm_preserves_za))
svuint64_t svread_hor_za64_u64_m(svuint64_t, svbool_t, uint64_t, uint32_t, uint64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za64_f64_m), arm_streaming, arm_shared_za, arm_preserves_za))
svfloat64_t svread_hor_za64_f64_m(svfloat64_t, svbool_t, uint64_t, uint32_t, uint64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za64_s64_m), arm_streaming, arm_shared_za, arm_preserves_za))
svint64_t svread_hor_za64_s64_m(svint64_t, svbool_t, uint64_t, uint32_t, uint64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za8_u8_m), arm_streaming, arm_shared_za, arm_preserves_za))
svuint8_t svread_hor_za8_u8_m(svuint8_t, svbool_t, uint64_t, uint32_t, uint64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za8_s8_m), arm_streaming, arm_shared_za, arm_preserves_za))
svint8_t svread_hor_za8_s8_m(svint8_t, svbool_t, uint64_t, uint32_t, uint64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za128_u8_m), arm_streaming, arm_shared_za, arm_preserves_za))
svuint8_t svread_ver_za128_u8_m(svuint8_t, svbool_t, uint64_t, uint32_t, uint64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za128_u32_m), arm_streaming, arm_shared_za, arm_preserves_za))
svuint32_t svread_ver_za128_u32_m(svuint32_t, svbool_t, uint64_t, uint32_t, uint64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za128_u64_m), arm_streaming, arm_shared_za, arm_preserves_za))
svuint64_t svread_ver_za128_u64_m(svuint64_t, svbool_t, uint64_t, uint32_t, uint64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za128_u16_m), arm_streaming, arm_shared_za, arm_preserves_za))
svuint16_t svread_ver_za128_u16_m(svuint16_t, svbool_t, uint64_t, uint32_t, uint64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za128_bf16_m), arm_streaming, arm_shared_za, arm_preserves_za))
svbfloat16_t svread_ver_za128_bf16_m(svbfloat16_t, svbool_t, uint64_t, uint32_t, uint64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za128_s8_m), arm_streaming, arm_shared_za, arm_preserves_za))
svint8_t svread_ver_za128_s8_m(svint8_t, svbool_t, uint64_t, uint32_t, uint64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za128_f64_m), arm_streaming, arm_shared_za, arm_preserves_za))
svfloat64_t svread_ver_za128_f64_m(svfloat64_t, svbool_t, uint64_t, uint32_t, uint64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za128_f32_m), arm_streaming, arm_shared_za, arm_preserves_za))
svfloat32_t svread_ver_za128_f32_m(svfloat32_t, svbool_t, uint64_t, uint32_t, uint64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za128_f16_m), arm_streaming, arm_shared_za, arm_preserves_za))
svfloat16_t svread_ver_za128_f16_m(svfloat16_t, svbool_t, uint64_t, uint32_t, uint64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za128_s32_m), arm_streaming, arm_shared_za, arm_preserves_za))
svint32_t svread_ver_za128_s32_m(svint32_t, svbool_t, uint64_t, uint32_t, uint64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za128_s64_m), arm_streaming, arm_shared_za, arm_preserves_za))
svint64_t svread_ver_za128_s64_m(svint64_t, svbool_t, uint64_t, uint32_t, uint64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za128_s16_m), arm_streaming, arm_shared_za, arm_preserves_za))
svint16_t svread_ver_za128_s16_m(svint16_t, svbool_t, uint64_t, uint32_t, uint64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za16_u16_m), arm_streaming, arm_shared_za, arm_preserves_za))
svuint16_t svread_ver_za16_u16_m(svuint16_t, svbool_t, uint64_t, uint32_t, uint64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za16_bf16_m), arm_streaming, arm_shared_za, arm_preserves_za))
svbfloat16_t svread_ver_za16_bf16_m(svbfloat16_t, svbool_t, uint64_t, uint32_t, uint64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za16_f16_m), arm_streaming, arm_shared_za, arm_preserves_za))
svfloat16_t svread_ver_za16_f16_m(svfloat16_t, svbool_t, uint64_t, uint32_t, uint64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za16_s16_m), arm_streaming, arm_shared_za, arm_preserves_za))
svint16_t svread_ver_za16_s16_m(svint16_t, svbool_t, uint64_t, uint32_t, uint64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za32_u32_m), arm_streaming, arm_shared_za, arm_preserves_za))
svuint32_t svread_ver_za32_u32_m(svuint32_t, svbool_t, uint64_t, uint32_t, uint64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za32_f32_m), arm_streaming, arm_shared_za, arm_preserves_za))
svfloat32_t svread_ver_za32_f32_m(svfloat32_t, svbool_t, uint64_t, uint32_t, uint64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za32_s32_m), arm_streaming, arm_shared_za, arm_preserves_za))
svint32_t svread_ver_za32_s32_m(svint32_t, svbool_t, uint64_t, uint32_t, uint64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za64_u64_m), arm_streaming, arm_shared_za, arm_preserves_za))
svuint64_t svread_ver_za64_u64_m(svuint64_t, svbool_t, uint64_t, uint32_t, uint64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za64_f64_m), arm_streaming, arm_shared_za, arm_preserves_za))
svfloat64_t svread_ver_za64_f64_m(svfloat64_t, svbool_t, uint64_t, uint32_t, uint64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za64_s64_m), arm_streaming, arm_shared_za, arm_preserves_za))
svint64_t svread_ver_za64_s64_m(svint64_t, svbool_t, uint64_t, uint32_t, uint64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za8_u8_m), arm_streaming, arm_shared_za, arm_preserves_za))
svuint8_t svread_ver_za8_u8_m(svuint8_t, svbool_t, uint64_t, uint32_t, uint64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za8_s8_m), arm_streaming, arm_shared_za, arm_preserves_za))
svint8_t svread_ver_za8_s8_m(svint8_t, svbool_t, uint64_t, uint32_t, uint64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svst1_hor_vnum_za128), arm_streaming, arm_shared_za, arm_preserves_za))
void svst1_hor_vnum_za128(uint64_t, uint32_t, uint64_t, svbool_t, void *, int64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svst1_hor_vnum_za16), arm_streaming, arm_shared_za, arm_preserves_za))
void svst1_hor_vnum_za16(uint64_t, uint32_t, uint64_t, svbool_t, void *, int64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svst1_hor_vnum_za32), arm_streaming, arm_shared_za, arm_preserves_za))
void svst1_hor_vnum_za32(uint64_t, uint32_t, uint64_t, svbool_t, void *, int64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svst1_hor_vnum_za64), arm_streaming, arm_shared_za, arm_preserves_za))
void svst1_hor_vnum_za64(uint64_t, uint32_t, uint64_t, svbool_t, void *, int64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svst1_hor_vnum_za8), arm_streaming, arm_shared_za, arm_preserves_za))
void svst1_hor_vnum_za8(uint64_t, uint32_t, uint64_t, svbool_t, void *, int64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svst1_hor_za128), arm_streaming, arm_shared_za, arm_preserves_za))
void svst1_hor_za128(uint64_t, uint32_t, uint64_t, svbool_t, void *);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svst1_hor_za16), arm_streaming, arm_shared_za, arm_preserves_za))
void svst1_hor_za16(uint64_t, uint32_t, uint64_t, svbool_t, void *);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svst1_hor_za32), arm_streaming, arm_shared_za, arm_preserves_za))
void svst1_hor_za32(uint64_t, uint32_t, uint64_t, svbool_t, void *);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svst1_hor_za64), arm_streaming, arm_shared_za, arm_preserves_za))
void svst1_hor_za64(uint64_t, uint32_t, uint64_t, svbool_t, void *);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svst1_hor_za8), arm_streaming, arm_shared_za, arm_preserves_za))
void svst1_hor_za8(uint64_t, uint32_t, uint64_t, svbool_t, void *);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svst1_ver_vnum_za128), arm_streaming, arm_shared_za, arm_preserves_za))
void svst1_ver_vnum_za128(uint64_t, uint32_t, uint64_t, svbool_t, void *, int64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svst1_ver_vnum_za16), arm_streaming, arm_shared_za, arm_preserves_za))
void svst1_ver_vnum_za16(uint64_t, uint32_t, uint64_t, svbool_t, void *, int64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svst1_ver_vnum_za32), arm_streaming, arm_shared_za, arm_preserves_za))
void svst1_ver_vnum_za32(uint64_t, uint32_t, uint64_t, svbool_t, void *, int64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svst1_ver_vnum_za64), arm_streaming, arm_shared_za, arm_preserves_za))
void svst1_ver_vnum_za64(uint64_t, uint32_t, uint64_t, svbool_t, void *, int64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svst1_ver_vnum_za8), arm_streaming, arm_shared_za, arm_preserves_za))
void svst1_ver_vnum_za8(uint64_t, uint32_t, uint64_t, svbool_t, void *, int64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svst1_ver_za128), arm_streaming, arm_shared_za, arm_preserves_za))
void svst1_ver_za128(uint64_t, uint32_t, uint64_t, svbool_t, void *);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svst1_ver_za16), arm_streaming, arm_shared_za, arm_preserves_za))
void svst1_ver_za16(uint64_t, uint32_t, uint64_t, svbool_t, void *);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svst1_ver_za32), arm_streaming, arm_shared_za, arm_preserves_za))
void svst1_ver_za32(uint64_t, uint32_t, uint64_t, svbool_t, void *);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svst1_ver_za64), arm_streaming, arm_shared_za, arm_preserves_za))
void svst1_ver_za64(uint64_t, uint32_t, uint64_t, svbool_t, void *);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svst1_ver_za8), arm_streaming, arm_shared_za, arm_preserves_za))
void svst1_ver_za8(uint64_t, uint32_t, uint64_t, svbool_t, void *);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsumopa_za32_s8_m), arm_streaming, arm_shared_za))
void svsumopa_za32_s8_m(uint64_t, svbool_t, svbool_t, svint8_t, svuint8_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsumops_za32_s8_m), arm_streaming, arm_shared_za))
void svsumops_za32_s8_m(uint64_t, svbool_t, svbool_t, svint8_t, svuint8_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svusmopa_za32_u8_m), arm_streaming, arm_shared_za))
void svusmopa_za32_u8_m(uint64_t, svbool_t, svbool_t, svuint8_t, svint8_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svusmops_za32_u8_m), arm_streaming, arm_shared_za))
void svusmops_za32_u8_m(uint64_t, svbool_t, svbool_t, svuint8_t, svint8_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za128_u8_m), arm_streaming, arm_shared_za))
void svwrite_hor_za128_u8_m(uint64_t, uint32_t, uint64_t, svbool_t, svuint8_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za128_u32_m), arm_streaming, arm_shared_za))
void svwrite_hor_za128_u32_m(uint64_t, uint32_t, uint64_t, svbool_t, svuint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za128_u64_m), arm_streaming, arm_shared_za))
void svwrite_hor_za128_u64_m(uint64_t, uint32_t, uint64_t, svbool_t, svuint64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za128_u16_m), arm_streaming, arm_shared_za))
void svwrite_hor_za128_u16_m(uint64_t, uint32_t, uint64_t, svbool_t, svuint16_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za128_bf16_m), arm_streaming, arm_shared_za))
void svwrite_hor_za128_bf16_m(uint64_t, uint32_t, uint64_t, svbool_t, svbfloat16_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za128_s8_m), arm_streaming, arm_shared_za))
void svwrite_hor_za128_s8_m(uint64_t, uint32_t, uint64_t, svbool_t, svint8_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za128_f64_m), arm_streaming, arm_shared_za))
void svwrite_hor_za128_f64_m(uint64_t, uint32_t, uint64_t, svbool_t, svfloat64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za128_f32_m), arm_streaming, arm_shared_za))
void svwrite_hor_za128_f32_m(uint64_t, uint32_t, uint64_t, svbool_t, svfloat32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za128_f16_m), arm_streaming, arm_shared_za))
void svwrite_hor_za128_f16_m(uint64_t, uint32_t, uint64_t, svbool_t, svfloat16_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za128_s32_m), arm_streaming, arm_shared_za))
void svwrite_hor_za128_s32_m(uint64_t, uint32_t, uint64_t, svbool_t, svint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za128_s64_m), arm_streaming, arm_shared_za))
void svwrite_hor_za128_s64_m(uint64_t, uint32_t, uint64_t, svbool_t, svint64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za128_s16_m), arm_streaming, arm_shared_za))
void svwrite_hor_za128_s16_m(uint64_t, uint32_t, uint64_t, svbool_t, svint16_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za16_u16_m), arm_streaming, arm_shared_za))
void svwrite_hor_za16_u16_m(uint64_t, uint32_t, uint64_t, svbool_t, svuint16_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za16_bf16_m), arm_streaming, arm_shared_za))
void svwrite_hor_za16_bf16_m(uint64_t, uint32_t, uint64_t, svbool_t, svbfloat16_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za16_f16_m), arm_streaming, arm_shared_za))
void svwrite_hor_za16_f16_m(uint64_t, uint32_t, uint64_t, svbool_t, svfloat16_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za16_s16_m), arm_streaming, arm_shared_za))
void svwrite_hor_za16_s16_m(uint64_t, uint32_t, uint64_t, svbool_t, svint16_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za32_u32_m), arm_streaming, arm_shared_za))
void svwrite_hor_za32_u32_m(uint64_t, uint32_t, uint64_t, svbool_t, svuint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za32_f32_m), arm_streaming, arm_shared_za))
void svwrite_hor_za32_f32_m(uint64_t, uint32_t, uint64_t, svbool_t, svfloat32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za32_s32_m), arm_streaming, arm_shared_za))
void svwrite_hor_za32_s32_m(uint64_t, uint32_t, uint64_t, svbool_t, svint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za64_u64_m), arm_streaming, arm_shared_za))
void svwrite_hor_za64_u64_m(uint64_t, uint32_t, uint64_t, svbool_t, svuint64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za64_f64_m), arm_streaming, arm_shared_za))
void svwrite_hor_za64_f64_m(uint64_t, uint32_t, uint64_t, svbool_t, svfloat64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za64_s64_m), arm_streaming, arm_shared_za))
void svwrite_hor_za64_s64_m(uint64_t, uint32_t, uint64_t, svbool_t, svint64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za8_u8_m), arm_streaming, arm_shared_za))
void svwrite_hor_za8_u8_m(uint64_t, uint32_t, uint64_t, svbool_t, svuint8_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za8_s8_m), arm_streaming, arm_shared_za))
void svwrite_hor_za8_s8_m(uint64_t, uint32_t, uint64_t, svbool_t, svint8_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za128_u8_m), arm_streaming, arm_shared_za))
void svwrite_ver_za128_u8_m(uint64_t, uint32_t, uint64_t, svbool_t, svuint8_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za128_u32_m), arm_streaming, arm_shared_za))
void svwrite_ver_za128_u32_m(uint64_t, uint32_t, uint64_t, svbool_t, svuint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za128_u64_m), arm_streaming, arm_shared_za))
void svwrite_ver_za128_u64_m(uint64_t, uint32_t, uint64_t, svbool_t, svuint64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za128_u16_m), arm_streaming, arm_shared_za))
void svwrite_ver_za128_u16_m(uint64_t, uint32_t, uint64_t, svbool_t, svuint16_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za128_bf16_m), arm_streaming, arm_shared_za))
void svwrite_ver_za128_bf16_m(uint64_t, uint32_t, uint64_t, svbool_t, svbfloat16_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za128_s8_m), arm_streaming, arm_shared_za))
void svwrite_ver_za128_s8_m(uint64_t, uint32_t, uint64_t, svbool_t, svint8_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za128_f64_m), arm_streaming, arm_shared_za))
void svwrite_ver_za128_f64_m(uint64_t, uint32_t, uint64_t, svbool_t, svfloat64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za128_f32_m), arm_streaming, arm_shared_za))
void svwrite_ver_za128_f32_m(uint64_t, uint32_t, uint64_t, svbool_t, svfloat32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za128_f16_m), arm_streaming, arm_shared_za))
void svwrite_ver_za128_f16_m(uint64_t, uint32_t, uint64_t, svbool_t, svfloat16_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za128_s32_m), arm_streaming, arm_shared_za))
void svwrite_ver_za128_s32_m(uint64_t, uint32_t, uint64_t, svbool_t, svint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za128_s64_m), arm_streaming, arm_shared_za))
void svwrite_ver_za128_s64_m(uint64_t, uint32_t, uint64_t, svbool_t, svint64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za128_s16_m), arm_streaming, arm_shared_za))
void svwrite_ver_za128_s16_m(uint64_t, uint32_t, uint64_t, svbool_t, svint16_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za16_u16_m), arm_streaming, arm_shared_za))
void svwrite_ver_za16_u16_m(uint64_t, uint32_t, uint64_t, svbool_t, svuint16_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za16_bf16_m), arm_streaming, arm_shared_za))
void svwrite_ver_za16_bf16_m(uint64_t, uint32_t, uint64_t, svbool_t, svbfloat16_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za16_f16_m), arm_streaming, arm_shared_za))
void svwrite_ver_za16_f16_m(uint64_t, uint32_t, uint64_t, svbool_t, svfloat16_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za16_s16_m), arm_streaming, arm_shared_za))
void svwrite_ver_za16_s16_m(uint64_t, uint32_t, uint64_t, svbool_t, svint16_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za32_u32_m), arm_streaming, arm_shared_za))
void svwrite_ver_za32_u32_m(uint64_t, uint32_t, uint64_t, svbool_t, svuint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za32_f32_m), arm_streaming, arm_shared_za))
void svwrite_ver_za32_f32_m(uint64_t, uint32_t, uint64_t, svbool_t, svfloat32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za32_s32_m), arm_streaming, arm_shared_za))
void svwrite_ver_za32_s32_m(uint64_t, uint32_t, uint64_t, svbool_t, svint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za64_u64_m), arm_streaming, arm_shared_za))
void svwrite_ver_za64_u64_m(uint64_t, uint32_t, uint64_t, svbool_t, svuint64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za64_f64_m), arm_streaming, arm_shared_za))
void svwrite_ver_za64_f64_m(uint64_t, uint32_t, uint64_t, svbool_t, svfloat64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za64_s64_m), arm_streaming, arm_shared_za))
void svwrite_ver_za64_s64_m(uint64_t, uint32_t, uint64_t, svbool_t, svint64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za8_u8_m), arm_streaming, arm_shared_za))
void svwrite_ver_za8_u8_m(uint64_t, uint32_t, uint64_t, svbool_t, svuint8_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za8_s8_m), arm_streaming, arm_shared_za))
void svwrite_ver_za8_s8_m(uint64_t, uint32_t, uint64_t, svbool_t, svint8_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svzero_mask_za), arm_streaming_compatible, arm_shared_za))
void svzero_mask_za(uint64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svzero_za), arm_streaming_compatible, arm_shared_za))
void svzero_za();
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svaddha_za32_u32_m), arm_streaming, arm_shared_za))
void svaddha_za32_m(uint64_t, svbool_t, svbool_t, svuint32_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svaddha_za32_s32_m), arm_streaming, arm_shared_za))
void svaddha_za32_m(uint64_t, svbool_t, svbool_t, svint32_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svaddva_za32_u32_m), arm_streaming, arm_shared_za))
void svaddva_za32_m(uint64_t, svbool_t, svbool_t, svuint32_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svaddva_za32_s32_m), arm_streaming, arm_shared_za))
void svaddva_za32_m(uint64_t, svbool_t, svbool_t, svint32_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmopa_za32_f16_m), arm_streaming, arm_shared_za))
void svmopa_za32_m(uint64_t, svbool_t, svbool_t, svfloat16_t, svfloat16_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmopa_za32_bf16_m), arm_streaming, arm_shared_za))
void svmopa_za32_m(uint64_t, svbool_t, svbool_t, svbfloat16_t, svbfloat16_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmopa_za32_f32_m), arm_streaming, arm_shared_za))
void svmopa_za32_m(uint64_t, svbool_t, svbool_t, svfloat32_t, svfloat32_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmopa_za32_s8_m), arm_streaming, arm_shared_za))
void svmopa_za32_m(uint64_t, svbool_t, svbool_t, svint8_t, svint8_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmopa_za32_u8_m), arm_streaming, arm_shared_za))
void svmopa_za32_m(uint64_t, svbool_t, svbool_t, svuint8_t, svuint8_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmops_za32_f16_m), arm_streaming, arm_shared_za))
void svmops_za32_m(uint64_t, svbool_t, svbool_t, svfloat16_t, svfloat16_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmops_za32_bf16_m), arm_streaming, arm_shared_za))
void svmops_za32_m(uint64_t, svbool_t, svbool_t, svbfloat16_t, svbfloat16_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmops_za32_f32_m), arm_streaming, arm_shared_za))
void svmops_za32_m(uint64_t, svbool_t, svbool_t, svfloat32_t, svfloat32_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmops_za32_s8_m), arm_streaming, arm_shared_za))
void svmops_za32_m(uint64_t, svbool_t, svbool_t, svint8_t, svint8_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmops_za32_u8_m), arm_streaming, arm_shared_za))
void svmops_za32_m(uint64_t, svbool_t, svbool_t, svuint8_t, svuint8_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za128_u8_m), arm_streaming, arm_shared_za, arm_preserves_za))
svuint8_t svread_hor_za128_m(svuint8_t, svbool_t, uint64_t, uint32_t, uint64_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za128_u32_m), arm_streaming, arm_shared_za, arm_preserves_za))
svuint32_t svread_hor_za128_m(svuint32_t, svbool_t, uint64_t, uint32_t, uint64_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za128_u64_m), arm_streaming, arm_shared_za, arm_preserves_za))
svuint64_t svread_hor_za128_m(svuint64_t, svbool_t, uint64_t, uint32_t, uint64_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za128_u16_m), arm_streaming, arm_shared_za, arm_preserves_za))
svuint16_t svread_hor_za128_m(svuint16_t, svbool_t, uint64_t, uint32_t, uint64_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za128_bf16_m), arm_streaming, arm_shared_za, arm_preserves_za))
svbfloat16_t svread_hor_za128_m(svbfloat16_t, svbool_t, uint64_t, uint32_t, uint64_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za128_s8_m), arm_streaming, arm_shared_za, arm_preserves_za))
svint8_t svread_hor_za128_m(svint8_t, svbool_t, uint64_t, uint32_t, uint64_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za128_f64_m), arm_streaming, arm_shared_za, arm_preserves_za))
svfloat64_t svread_hor_za128_m(svfloat64_t, svbool_t, uint64_t, uint32_t, uint64_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za128_f32_m), arm_streaming, arm_shared_za, arm_preserves_za))
svfloat32_t svread_hor_za128_m(svfloat32_t, svbool_t, uint64_t, uint32_t, uint64_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za128_f16_m), arm_streaming, arm_shared_za, arm_preserves_za))
svfloat16_t svread_hor_za128_m(svfloat16_t, svbool_t, uint64_t, uint32_t, uint64_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za128_s32_m), arm_streaming, arm_shared_za, arm_preserves_za))
svint32_t svread_hor_za128_m(svint32_t, svbool_t, uint64_t, uint32_t, uint64_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za128_s64_m), arm_streaming, arm_shared_za, arm_preserves_za))
svint64_t svread_hor_za128_m(svint64_t, svbool_t, uint64_t, uint32_t, uint64_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za128_s16_m), arm_streaming, arm_shared_za, arm_preserves_za))
svint16_t svread_hor_za128_m(svint16_t, svbool_t, uint64_t, uint32_t, uint64_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za16_u16_m), arm_streaming, arm_shared_za, arm_preserves_za))
svuint16_t svread_hor_za16_m(svuint16_t, svbool_t, uint64_t, uint32_t, uint64_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za16_bf16_m), arm_streaming, arm_shared_za, arm_preserves_za))
svbfloat16_t svread_hor_za16_m(svbfloat16_t, svbool_t, uint64_t, uint32_t, uint64_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za16_f16_m), arm_streaming, arm_shared_za, arm_preserves_za))
svfloat16_t svread_hor_za16_m(svfloat16_t, svbool_t, uint64_t, uint32_t, uint64_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za16_s16_m), arm_streaming, arm_shared_za, arm_preserves_za))
svint16_t svread_hor_za16_m(svint16_t, svbool_t, uint64_t, uint32_t, uint64_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za32_u32_m), arm_streaming, arm_shared_za, arm_preserves_za))
svuint32_t svread_hor_za32_m(svuint32_t, svbool_t, uint64_t, uint32_t, uint64_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za32_f32_m), arm_streaming, arm_shared_za, arm_preserves_za))
svfloat32_t svread_hor_za32_m(svfloat32_t, svbool_t, uint64_t, uint32_t, uint64_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za32_s32_m), arm_streaming, arm_shared_za, arm_preserves_za))
svint32_t svread_hor_za32_m(svint32_t, svbool_t, uint64_t, uint32_t, uint64_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za64_u64_m), arm_streaming, arm_shared_za, arm_preserves_za))
svuint64_t svread_hor_za64_m(svuint64_t, svbool_t, uint64_t, uint32_t, uint64_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za64_f64_m), arm_streaming, arm_shared_za, arm_preserves_za))
svfloat64_t svread_hor_za64_m(svfloat64_t, svbool_t, uint64_t, uint32_t, uint64_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za64_s64_m), arm_streaming, arm_shared_za, arm_preserves_za))
svint64_t svread_hor_za64_m(svint64_t, svbool_t, uint64_t, uint32_t, uint64_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za8_u8_m), arm_streaming, arm_shared_za, arm_preserves_za))
svuint8_t svread_hor_za8_m(svuint8_t, svbool_t, uint64_t, uint32_t, uint64_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za8_s8_m), arm_streaming, arm_shared_za, arm_preserves_za))
svint8_t svread_hor_za8_m(svint8_t, svbool_t, uint64_t, uint32_t, uint64_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za128_u8_m), arm_streaming, arm_shared_za, arm_preserves_za))
svuint8_t svread_ver_za128_m(svuint8_t, svbool_t, uint64_t, uint32_t, uint64_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za128_u32_m), arm_streaming, arm_shared_za, arm_preserves_za))
svuint32_t svread_ver_za128_m(svuint32_t, svbool_t, uint64_t, uint32_t, uint64_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za128_u64_m), arm_streaming, arm_shared_za, arm_preserves_za))
svuint64_t svread_ver_za128_m(svuint64_t, svbool_t, uint64_t, uint32_t, uint64_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za128_u16_m), arm_streaming, arm_shared_za, arm_preserves_za))
svuint16_t svread_ver_za128_m(svuint16_t, svbool_t, uint64_t, uint32_t, uint64_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za128_bf16_m), arm_streaming, arm_shared_za, arm_preserves_za))
svbfloat16_t svread_ver_za128_m(svbfloat16_t, svbool_t, uint64_t, uint32_t, uint64_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za128_s8_m), arm_streaming, arm_shared_za, arm_preserves_za))
svint8_t svread_ver_za128_m(svint8_t, svbool_t, uint64_t, uint32_t, uint64_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za128_f64_m), arm_streaming, arm_shared_za, arm_preserves_za))
svfloat64_t svread_ver_za128_m(svfloat64_t, svbool_t, uint64_t, uint32_t, uint64_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za128_f32_m), arm_streaming, arm_shared_za, arm_preserves_za))
svfloat32_t svread_ver_za128_m(svfloat32_t, svbool_t, uint64_t, uint32_t, uint64_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za128_f16_m), arm_streaming, arm_shared_za, arm_preserves_za))
svfloat16_t svread_ver_za128_m(svfloat16_t, svbool_t, uint64_t, uint32_t, uint64_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za128_s32_m), arm_streaming, arm_shared_za, arm_preserves_za))
svint32_t svread_ver_za128_m(svint32_t, svbool_t, uint64_t, uint32_t, uint64_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za128_s64_m), arm_streaming, arm_shared_za, arm_preserves_za))
svint64_t svread_ver_za128_m(svint64_t, svbool_t, uint64_t, uint32_t, uint64_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za128_s16_m), arm_streaming, arm_shared_za, arm_preserves_za))
svint16_t svread_ver_za128_m(svint16_t, svbool_t, uint64_t, uint32_t, uint64_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za16_u16_m), arm_streaming, arm_shared_za, arm_preserves_za))
svuint16_t svread_ver_za16_m(svuint16_t, svbool_t, uint64_t, uint32_t, uint64_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za16_bf16_m), arm_streaming, arm_shared_za, arm_preserves_za))
svbfloat16_t svread_ver_za16_m(svbfloat16_t, svbool_t, uint64_t, uint32_t, uint64_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za16_f16_m), arm_streaming, arm_shared_za, arm_preserves_za))
svfloat16_t svread_ver_za16_m(svfloat16_t, svbool_t, uint64_t, uint32_t, uint64_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za16_s16_m), arm_streaming, arm_shared_za, arm_preserves_za))
svint16_t svread_ver_za16_m(svint16_t, svbool_t, uint64_t, uint32_t, uint64_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za32_u32_m), arm_streaming, arm_shared_za, arm_preserves_za))
svuint32_t svread_ver_za32_m(svuint32_t, svbool_t, uint64_t, uint32_t, uint64_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za32_f32_m), arm_streaming, arm_shared_za, arm_preserves_za))
svfloat32_t svread_ver_za32_m(svfloat32_t, svbool_t, uint64_t, uint32_t, uint64_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za32_s32_m), arm_streaming, arm_shared_za, arm_preserves_za))
svint32_t svread_ver_za32_m(svint32_t, svbool_t, uint64_t, uint32_t, uint64_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za64_u64_m), arm_streaming, arm_shared_za, arm_preserves_za))
svuint64_t svread_ver_za64_m(svuint64_t, svbool_t, uint64_t, uint32_t, uint64_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za64_f64_m), arm_streaming, arm_shared_za, arm_preserves_za))
svfloat64_t svread_ver_za64_m(svfloat64_t, svbool_t, uint64_t, uint32_t, uint64_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za64_s64_m), arm_streaming, arm_shared_za, arm_preserves_za))
svint64_t svread_ver_za64_m(svint64_t, svbool_t, uint64_t, uint32_t, uint64_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za8_u8_m), arm_streaming, arm_shared_za, arm_preserves_za))
svuint8_t svread_ver_za8_m(svuint8_t, svbool_t, uint64_t, uint32_t, uint64_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za8_s8_m), arm_streaming, arm_shared_za, arm_preserves_za))
svint8_t svread_ver_za8_m(svint8_t, svbool_t, uint64_t, uint32_t, uint64_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsumopa_za32_s8_m), arm_streaming, arm_shared_za))
void svsumopa_za32_m(uint64_t, svbool_t, svbool_t, svint8_t, svuint8_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsumops_za32_s8_m), arm_streaming, arm_shared_za))
void svsumops_za32_m(uint64_t, svbool_t, svbool_t, svint8_t, svuint8_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svusmopa_za32_u8_m), arm_streaming, arm_shared_za))
void svusmopa_za32_m(uint64_t, svbool_t, svbool_t, svuint8_t, svint8_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svusmops_za32_u8_m), arm_streaming, arm_shared_za))
void svusmops_za32_m(uint64_t, svbool_t, svbool_t, svuint8_t, svint8_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za128_u8_m), arm_streaming, arm_shared_za))
void svwrite_hor_za128_m(uint64_t, uint32_t, uint64_t, svbool_t, svuint8_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za128_u32_m), arm_streaming, arm_shared_za))
void svwrite_hor_za128_m(uint64_t, uint32_t, uint64_t, svbool_t, svuint32_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za128_u64_m), arm_streaming, arm_shared_za))
void svwrite_hor_za128_m(uint64_t, uint32_t, uint64_t, svbool_t, svuint64_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za128_u16_m), arm_streaming, arm_shared_za))
void svwrite_hor_za128_m(uint64_t, uint32_t, uint64_t, svbool_t, svuint16_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za128_bf16_m), arm_streaming, arm_shared_za))
void svwrite_hor_za128_m(uint64_t, uint32_t, uint64_t, svbool_t, svbfloat16_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za128_s8_m), arm_streaming, arm_shared_za))
void svwrite_hor_za128_m(uint64_t, uint32_t, uint64_t, svbool_t, svint8_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za128_f64_m), arm_streaming, arm_shared_za))
void svwrite_hor_za128_m(uint64_t, uint32_t, uint64_t, svbool_t, svfloat64_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za128_f32_m), arm_streaming, arm_shared_za))
void svwrite_hor_za128_m(uint64_t, uint32_t, uint64_t, svbool_t, svfloat32_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za128_f16_m), arm_streaming, arm_shared_za))
void svwrite_hor_za128_m(uint64_t, uint32_t, uint64_t, svbool_t, svfloat16_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za128_s32_m), arm_streaming, arm_shared_za))
void svwrite_hor_za128_m(uint64_t, uint32_t, uint64_t, svbool_t, svint32_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za128_s64_m), arm_streaming, arm_shared_za))
void svwrite_hor_za128_m(uint64_t, uint32_t, uint64_t, svbool_t, svint64_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za128_s16_m), arm_streaming, arm_shared_za))
void svwrite_hor_za128_m(uint64_t, uint32_t, uint64_t, svbool_t, svint16_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za16_u16_m), arm_streaming, arm_shared_za))
void svwrite_hor_za16_m(uint64_t, uint32_t, uint64_t, svbool_t, svuint16_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za16_bf16_m), arm_streaming, arm_shared_za))
void svwrite_hor_za16_m(uint64_t, uint32_t, uint64_t, svbool_t, svbfloat16_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za16_f16_m), arm_streaming, arm_shared_za))
void svwrite_hor_za16_m(uint64_t, uint32_t, uint64_t, svbool_t, svfloat16_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za16_s16_m), arm_streaming, arm_shared_za))
void svwrite_hor_za16_m(uint64_t, uint32_t, uint64_t, svbool_t, svint16_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za32_u32_m), arm_streaming, arm_shared_za))
void svwrite_hor_za32_m(uint64_t, uint32_t, uint64_t, svbool_t, svuint32_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za32_f32_m), arm_streaming, arm_shared_za))
void svwrite_hor_za32_m(uint64_t, uint32_t, uint64_t, svbool_t, svfloat32_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za32_s32_m), arm_streaming, arm_shared_za))
void svwrite_hor_za32_m(uint64_t, uint32_t, uint64_t, svbool_t, svint32_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za64_u64_m), arm_streaming, arm_shared_za))
void svwrite_hor_za64_m(uint64_t, uint32_t, uint64_t, svbool_t, svuint64_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za64_f64_m), arm_streaming, arm_shared_za))
void svwrite_hor_za64_m(uint64_t, uint32_t, uint64_t, svbool_t, svfloat64_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za64_s64_m), arm_streaming, arm_shared_za))
void svwrite_hor_za64_m(uint64_t, uint32_t, uint64_t, svbool_t, svint64_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za8_u8_m), arm_streaming, arm_shared_za))
void svwrite_hor_za8_m(uint64_t, uint32_t, uint64_t, svbool_t, svuint8_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za8_s8_m), arm_streaming, arm_shared_za))
void svwrite_hor_za8_m(uint64_t, uint32_t, uint64_t, svbool_t, svint8_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za128_u8_m), arm_streaming, arm_shared_za))
void svwrite_ver_za128_m(uint64_t, uint32_t, uint64_t, svbool_t, svuint8_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za128_u32_m), arm_streaming, arm_shared_za))
void svwrite_ver_za128_m(uint64_t, uint32_t, uint64_t, svbool_t, svuint32_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za128_u64_m), arm_streaming, arm_shared_za))
void svwrite_ver_za128_m(uint64_t, uint32_t, uint64_t, svbool_t, svuint64_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za128_u16_m), arm_streaming, arm_shared_za))
void svwrite_ver_za128_m(uint64_t, uint32_t, uint64_t, svbool_t, svuint16_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za128_bf16_m), arm_streaming, arm_shared_za))
void svwrite_ver_za128_m(uint64_t, uint32_t, uint64_t, svbool_t, svbfloat16_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za128_s8_m), arm_streaming, arm_shared_za))
void svwrite_ver_za128_m(uint64_t, uint32_t, uint64_t, svbool_t, svint8_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za128_f64_m), arm_streaming, arm_shared_za))
void svwrite_ver_za128_m(uint64_t, uint32_t, uint64_t, svbool_t, svfloat64_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za128_f32_m), arm_streaming, arm_shared_za))
void svwrite_ver_za128_m(uint64_t, uint32_t, uint64_t, svbool_t, svfloat32_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za128_f16_m), arm_streaming, arm_shared_za))
void svwrite_ver_za128_m(uint64_t, uint32_t, uint64_t, svbool_t, svfloat16_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za128_s32_m), arm_streaming, arm_shared_za))
void svwrite_ver_za128_m(uint64_t, uint32_t, uint64_t, svbool_t, svint32_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za128_s64_m), arm_streaming, arm_shared_za))
void svwrite_ver_za128_m(uint64_t, uint32_t, uint64_t, svbool_t, svint64_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za128_s16_m), arm_streaming, arm_shared_za))
void svwrite_ver_za128_m(uint64_t, uint32_t, uint64_t, svbool_t, svint16_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za16_u16_m), arm_streaming, arm_shared_za))
void svwrite_ver_za16_m(uint64_t, uint32_t, uint64_t, svbool_t, svuint16_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za16_bf16_m), arm_streaming, arm_shared_za))
void svwrite_ver_za16_m(uint64_t, uint32_t, uint64_t, svbool_t, svbfloat16_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za16_f16_m), arm_streaming, arm_shared_za))
void svwrite_ver_za16_m(uint64_t, uint32_t, uint64_t, svbool_t, svfloat16_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za16_s16_m), arm_streaming, arm_shared_za))
void svwrite_ver_za16_m(uint64_t, uint32_t, uint64_t, svbool_t, svint16_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za32_u32_m), arm_streaming, arm_shared_za))
void svwrite_ver_za32_m(uint64_t, uint32_t, uint64_t, svbool_t, svuint32_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za32_f32_m), arm_streaming, arm_shared_za))
void svwrite_ver_za32_m(uint64_t, uint32_t, uint64_t, svbool_t, svfloat32_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za32_s32_m), arm_streaming, arm_shared_za))
void svwrite_ver_za32_m(uint64_t, uint32_t, uint64_t, svbool_t, svint32_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za64_u64_m), arm_streaming, arm_shared_za))
void svwrite_ver_za64_m(uint64_t, uint32_t, uint64_t, svbool_t, svuint64_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za64_f64_m), arm_streaming, arm_shared_za))
void svwrite_ver_za64_m(uint64_t, uint32_t, uint64_t, svbool_t, svfloat64_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za64_s64_m), arm_streaming, arm_shared_za))
void svwrite_ver_za64_m(uint64_t, uint32_t, uint64_t, svbool_t, svint64_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za8_u8_m), arm_streaming, arm_shared_za))
void svwrite_ver_za8_m(uint64_t, uint32_t, uint64_t, svbool_t, svuint8_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za8_s8_m), arm_streaming, arm_shared_za))
void svwrite_ver_za8_m(uint64_t, uint32_t, uint64_t, svbool_t, svint8_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmopa_za64_f64_m), arm_streaming, arm_shared_za))
void svmopa_za64_f64_m(uint64_t, svbool_t, svbool_t, svfloat64_t, svfloat64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmops_za64_f64_m), arm_streaming, arm_shared_za))
void svmops_za64_f64_m(uint64_t, svbool_t, svbool_t, svfloat64_t, svfloat64_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmopa_za64_f64_m), arm_streaming, arm_shared_za))
void svmopa_za64_m(uint64_t, svbool_t, svbool_t, svfloat64_t, svfloat64_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmops_za64_f64_m), arm_streaming, arm_shared_za))
void svmops_za64_m(uint64_t, svbool_t, svbool_t, svfloat64_t, svfloat64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svaddha_za64_u64_m), arm_streaming, arm_shared_za))
void svaddha_za64_u64_m(uint64_t, svbool_t, svbool_t, svuint64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svaddha_za64_s64_m), arm_streaming, arm_shared_za))
void svaddha_za64_s64_m(uint64_t, svbool_t, svbool_t, svint64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svaddva_za64_u64_m), arm_streaming, arm_shared_za))
void svaddva_za64_u64_m(uint64_t, svbool_t, svbool_t, svuint64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svaddva_za64_s64_m), arm_streaming, arm_shared_za))
void svaddva_za64_s64_m(uint64_t, svbool_t, svbool_t, svint64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmopa_za64_s16_m), arm_streaming, arm_shared_za))
void svmopa_za64_s16_m(uint64_t, svbool_t, svbool_t, svint16_t, svint16_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmopa_za64_u16_m), arm_streaming, arm_shared_za))
void svmopa_za64_u16_m(uint64_t, svbool_t, svbool_t, svuint16_t, svuint16_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmops_za64_s16_m), arm_streaming, arm_shared_za))
void svmops_za64_s16_m(uint64_t, svbool_t, svbool_t, svint16_t, svint16_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmops_za64_u16_m), arm_streaming, arm_shared_za))
void svmops_za64_u16_m(uint64_t, svbool_t, svbool_t, svuint16_t, svuint16_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsumopa_za64_s16_m), arm_streaming, arm_shared_za))
void svsumopa_za64_s16_m(uint64_t, svbool_t, svbool_t, svint16_t, svuint16_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsumops_za64_s16_m), arm_streaming, arm_shared_za))
void svsumops_za64_s16_m(uint64_t, svbool_t, svbool_t, svint16_t, svuint16_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svusmopa_za64_u16_m), arm_streaming, arm_shared_za))
void svusmopa_za64_u16_m(uint64_t, svbool_t, svbool_t, svuint16_t, svint16_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svusmops_za64_u16_m), arm_streaming, arm_shared_za))
void svusmops_za64_u16_m(uint64_t, svbool_t, svbool_t, svuint16_t, svint16_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svaddha_za64_u64_m), arm_streaming, arm_shared_za))
void svaddha_za64_m(uint64_t, svbool_t, svbool_t, svuint64_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svaddha_za64_s64_m), arm_streaming, arm_shared_za))
void svaddha_za64_m(uint64_t, svbool_t, svbool_t, svint64_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svaddva_za64_u64_m), arm_streaming, arm_shared_za))
void svaddva_za64_m(uint64_t, svbool_t, svbool_t, svuint64_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svaddva_za64_s64_m), arm_streaming, arm_shared_za))
void svaddva_za64_m(uint64_t, svbool_t, svbool_t, svint64_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmopa_za64_s16_m), arm_streaming, arm_shared_za))
void svmopa_za64_m(uint64_t, svbool_t, svbool_t, svint16_t, svint16_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmopa_za64_u16_m), arm_streaming, arm_shared_za))
void svmopa_za64_m(uint64_t, svbool_t, svbool_t, svuint16_t, svuint16_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmops_za64_s16_m), arm_streaming, arm_shared_za))
void svmops_za64_m(uint64_t, svbool_t, svbool_t, svint16_t, svint16_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmops_za64_u16_m), arm_streaming, arm_shared_za))
void svmops_za64_m(uint64_t, svbool_t, svbool_t, svuint16_t, svuint16_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsumopa_za64_s16_m), arm_streaming, arm_shared_za))
void svsumopa_za64_m(uint64_t, svbool_t, svbool_t, svint16_t, svuint16_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsumops_za64_s16_m), arm_streaming, arm_shared_za))
void svsumops_za64_m(uint64_t, svbool_t, svbool_t, svint16_t, svuint16_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svusmopa_za64_u16_m), arm_streaming, arm_shared_za))
void svusmopa_za64_m(uint64_t, svbool_t, svbool_t, svuint16_t, svint16_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svusmops_za64_u16_m), arm_streaming, arm_shared_za))
void svusmops_za64_m(uint64_t, svbool_t, svbool_t, svuint16_t, svint16_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svldr_vnum_za), arm_streaming_compatible, arm_shared_za))
void svldr_vnum_za(uint32_t, uint64_t, void const *);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svstr_vnum_za), arm_streaming_compatible, arm_shared_za, arm_preserves_za))
void svstr_vnum_za(uint32_t, uint64_t, void *);
#ifdef __cplusplus
} // extern "C"
#endif
#undef __ai
#endif /* __ARM_SME_H */
+68 -13
View File
@@ -37,7 +37,6 @@ typedef __SVFloat16_t svfloat16_t;
typedef __SVBFloat16_t svbfloat16_t;
#include <arm_bf16.h>
typedef __bf16 bfloat16_t;
typedef __SVFloat32_t svfloat32_t;
typedef __SVFloat64_t svfloat64_t;
typedef __clang_svint8x2_t svint8x2_t;
@@ -74,10 +73,14 @@ typedef __clang_svfloat16x4_t svfloat16x4_t;
typedef __clang_svfloat32x4_t svfloat32x4_t;
typedef __clang_svfloat64x4_t svfloat64x4_t;
typedef __SVBool_t svbool_t;
typedef __clang_svboolx2_t svboolx2_t;
typedef __clang_svboolx4_t svboolx4_t;
typedef __clang_svbfloat16x2_t svbfloat16x2_t;
typedef __clang_svbfloat16x3_t svbfloat16x3_t;
typedef __clang_svbfloat16x4_t svbfloat16x4_t;
typedef __SVCount_t svcount_t;
enum svpattern
{
SV_POW2 = 0,
@@ -2914,6 +2917,10 @@ __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_s64)))
svint64_t svdup_lane_s64(svint64_t, uint64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_s16)))
svint16_t svdup_lane_s16(svint16_t, uint16_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_u8)))
svuint8_t svdupq_n_u8(uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_s8)))
svint8_t svdupq_n_s8(int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_u16)))
svuint16_t svdupq_n_u16(uint16_t, uint16_t, uint16_t, uint16_t, uint16_t, uint16_t, uint16_t, uint16_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_f16)))
@@ -2932,18 +2939,14 @@ __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_f64)))
svfloat64_t svdupq_n_f64(float64_t, float64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_s64)))
svint64_t svdupq_n_s64(int64_t, int64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_u8)))
svuint8_t svdupq_n_u8(uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_s8)))
svint8_t svdupq_n_s8(int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_b8)))
svbool_t svdupq_n_b8(bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_b16)))
svbool_t svdupq_n_b16(bool, bool, bool, bool, bool, bool, bool, bool);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_b32)))
svbool_t svdupq_n_b32(bool, bool, bool, bool);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_b64)))
svbool_t svdupq_n_b64(bool, bool);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_b8)))
svbool_t svdupq_n_b8(bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_u8)))
svuint8_t svdupq_lane_u8(svuint8_t, uint64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_u32)))
@@ -10528,6 +10531,10 @@ __aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_s64)))
svint64_t svdup_lane(svint64_t, uint64_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_s16)))
svint16_t svdup_lane(svint16_t, uint16_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_u8)))
svuint8_t svdupq_u8(uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_s8)))
svint8_t svdupq_s8(int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_u16)))
svuint16_t svdupq_u16(uint16_t, uint16_t, uint16_t, uint16_t, uint16_t, uint16_t, uint16_t, uint16_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_f16)))
@@ -10546,18 +10553,14 @@ __aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_f64)))
svfloat64_t svdupq_f64(float64_t, float64_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_s64)))
svint64_t svdupq_s64(int64_t, int64_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_u8)))
svuint8_t svdupq_u8(uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_s8)))
svint8_t svdupq_s8(int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_b8)))
svbool_t svdupq_b8(bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_b16)))
svbool_t svdupq_b16(bool, bool, bool, bool, bool, bool, bool, bool);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_b32)))
svbool_t svdupq_b32(bool, bool, bool, bool);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_b64)))
svbool_t svdupq_b64(bool, bool);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_b8)))
svbool_t svdupq_b8(bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_u8)))
svuint8_t svdupq_lane(svuint8_t, uint64_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_u32)))
@@ -23874,6 +23877,58 @@ __aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsm4e_u32)))
svuint32_t svsm4e(svuint32_t, svuint32_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsm4ekey_u32)))
svuint32_t svsm4ekey(svuint32_t, svuint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_f64)))
svfloat64_t svclamp_f64(svfloat64_t, svfloat64_t, svfloat64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_f32)))
svfloat32_t svclamp_f32(svfloat32_t, svfloat32_t, svfloat32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_f16)))
svfloat16_t svclamp_f16(svfloat16_t, svfloat16_t, svfloat16_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_s8)))
svint8_t svclamp_s8(svint8_t, svint8_t, svint8_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_s32)))
svint32_t svclamp_s32(svint32_t, svint32_t, svint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_s64)))
svint64_t svclamp_s64(svint64_t, svint64_t, svint64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_s16)))
svint16_t svclamp_s16(svint16_t, svint16_t, svint16_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_u8)))
svuint8_t svclamp_u8(svuint8_t, svuint8_t, svuint8_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_u32)))
svuint32_t svclamp_u32(svuint32_t, svuint32_t, svuint32_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_u64)))
svuint64_t svclamp_u64(svuint64_t, svuint64_t, svuint64_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_u16)))
svuint16_t svclamp_u16(svuint16_t, svuint16_t, svuint16_t);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svptrue_c8)))
svcount_t svptrue_c8(void);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svptrue_c32)))
svcount_t svptrue_c32(void);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svptrue_c64)))
svcount_t svptrue_c64(void);
__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svptrue_c16)))
svcount_t svptrue_c16(void);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_f64)))
svfloat64_t svclamp(svfloat64_t, svfloat64_t, svfloat64_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_f32)))
svfloat32_t svclamp(svfloat32_t, svfloat32_t, svfloat32_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_f16)))
svfloat16_t svclamp(svfloat16_t, svfloat16_t, svfloat16_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_s8)))
svint8_t svclamp(svint8_t, svint8_t, svint8_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_s32)))
svint32_t svclamp(svint32_t, svint32_t, svint32_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_s64)))
svint64_t svclamp(svint64_t, svint64_t, svint64_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_s16)))
svint16_t svclamp(svint16_t, svint16_t, svint16_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_u8)))
svuint8_t svclamp(svuint8_t, svuint8_t, svuint8_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_u32)))
svuint32_t svclamp(svuint32_t, svuint32_t, svuint32_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_u64)))
svuint64_t svclamp(svuint64_t, svuint64_t, svuint64_t);
__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_u16)))
svuint16_t svclamp(svuint16_t, svuint16_t, svuint16_t);
#define svcvtnt_bf16_x svcvtnt_bf16_m
#define svcvtnt_bf16_f32_x svcvtnt_bf16_f32_m
#define svcvtnt_f16_x svcvtnt_f16_m
+4116 -1
View File
@@ -19,128 +19,539 @@
#define __DEFAULT_FN_ATTRS128 __attribute__((__always_inline__, __nodebug__, __target__("avx2"), __min_vector_width__(128)))
/* SSE4 Multiple Packed Sums of Absolute Difference. */
/// Computes sixteen sum of absolute difference (SAD) operations on sets of
/// four unsigned 8-bit integers from the 256-bit integer vectors \a X and
/// \a Y.
///
/// Eight SAD results are computed using the lower half of the input
/// vectors, and another eight using the upper half. These 16-bit values
/// are returned in the lower and upper halves of the 256-bit result,
/// respectively.
///
/// A single SAD operation selects four bytes from \a X and four bytes from
/// \a Y as input. It computes the differences between each \a X byte and
/// the corresponding \a Y byte, takes the absolute value of each
/// difference, and sums these four values to form one 16-bit result. The
/// intrinsic computes 16 of these results with different sets of input
/// bytes.
///
/// For each set of eight results, the SAD operations use the same four
/// bytes from \a Y; the starting bit position for these four bytes is
/// specified by \a M[1:0] times 32. The eight operations use successive
/// sets of four bytes from \a X; the starting bit position for the first
/// set of four bytes is specified by \a M[2] times 32. These bit positions
/// are all relative to the 128-bit lane for each set of eight operations.
///
/// \code{.operation}
/// r := 0
/// FOR i := 0 TO 1
/// j := i*3
/// Ybase := M[j+1:j]*32 + i*128
/// Xbase := M[j+2]*32 + i*128
/// FOR k := 0 TO 3
/// temp0 := ABS(X[Xbase+7:Xbase] - Y[Ybase+7:Ybase])
/// temp1 := ABS(X[Xbase+15:Xbase+8] - Y[Ybase+15:Ybase+8])
/// temp2 := ABS(X[Xbase+23:Xbase+16] - Y[Ybase+23:Ybase+16])
/// temp3 := ABS(X[Xbase+31:Xbase+24] - Y[Ybase+31:Ybase+24])
/// result[r+15:r] := temp0 + temp1 + temp2 + temp3
/// Xbase := Xbase + 8
/// r := r + 16
/// ENDFOR
/// ENDFOR
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// \code
/// __m256i _mm256_mpsadbw_epu8(__m256i X, __m256i Y, const int M);
/// \endcode
///
/// This intrinsic corresponds to the \c VMPSADBW instruction.
///
/// \param X
/// A 256-bit integer vector containing one of the inputs.
/// \param Y
/// A 256-bit integer vector containing one of the inputs.
/// \param M
/// An unsigned immediate value specifying the starting positions of the
/// bytes to operate on.
/// \returns A 256-bit vector of [16 x i16] containing the result.
#define _mm256_mpsadbw_epu8(X, Y, M) \
((__m256i)__builtin_ia32_mpsadbw256((__v32qi)(__m256i)(X), \
(__v32qi)(__m256i)(Y), (int)(M)))
/// Computes the absolute value of each signed byte in the 256-bit integer
/// vector \a __a and returns each value in the corresponding byte of
/// the result.
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VPABSB instruction.
///
/// \param __a
/// A 256-bit integer vector.
/// \returns A 256-bit integer vector containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_abs_epi8(__m256i __a)
{
return (__m256i)__builtin_elementwise_abs((__v32qs)__a);
}
/// Computes the absolute value of each signed 16-bit element in the 256-bit
/// vector of [16 x i16] in \a __a and returns each value in the
/// corresponding element of the result.
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VPABSW instruction.
///
/// \param __a
/// A 256-bit vector of [16 x i16].
/// \returns A 256-bit vector of [16 x i16] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_abs_epi16(__m256i __a)
{
return (__m256i)__builtin_elementwise_abs((__v16hi)__a);
}
/// Computes the absolute value of each signed 32-bit element in the 256-bit
/// vector of [8 x i32] in \a __a and returns each value in the
/// corresponding element of the result.
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VPABSD instruction.
///
/// \param __a
/// A 256-bit vector of [8 x i32].
/// \returns A 256-bit vector of [8 x i32] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_abs_epi32(__m256i __a)
{
return (__m256i)__builtin_elementwise_abs((__v8si)__a);
}
/// Converts the elements of two 256-bit vectors of [16 x i16] to 8-bit
/// integers using signed saturation, and returns the 256-bit result.
///
/// \code{.operation}
/// FOR i := 0 TO 7
/// j := i*16
/// k := i*8
/// result[7+k:k] := SATURATE8(__a[15+j:j])
/// result[71+k:64+k] := SATURATE8(__b[15+j:j])
/// result[135+k:128+k] := SATURATE8(__a[143+j:128+j])
/// result[199+k:192+k] := SATURATE8(__b[143+j:128+j])
/// ENDFOR
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VPACKSSWB instruction.
///
/// \param __a
/// A 256-bit vector of [16 x i16] used to generate result[63:0] and
/// result[191:128].
/// \param __b
/// A 256-bit vector of [16 x i16] used to generate result[127:64] and
/// result[255:192].
/// \returns A 256-bit integer vector containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_packs_epi16(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_ia32_packsswb256((__v16hi)__a, (__v16hi)__b);
}
/// Converts the elements of two 256-bit vectors of [8 x i32] to 16-bit
/// integers using signed saturation, and returns the resulting 256-bit
/// vector of [16 x i16].
///
/// \code{.operation}
/// FOR i := 0 TO 3
/// j := i*32
/// k := i*16
/// result[15+k:k] := SATURATE16(__a[31+j:j])
/// result[79+k:64+k] := SATURATE16(__b[31+j:j])
/// result[143+k:128+k] := SATURATE16(__a[159+j:128+j])
/// result[207+k:192+k] := SATURATE16(__b[159+j:128+j])
/// ENDFOR
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VPACKSSDW instruction.
///
/// \param __a
/// A 256-bit vector of [8 x i32] used to generate result[63:0] and
/// result[191:128].
/// \param __b
/// A 256-bit vector of [8 x i32] used to generate result[127:64] and
/// result[255:192].
/// \returns A 256-bit vector of [16 x i16] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_packs_epi32(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_ia32_packssdw256((__v8si)__a, (__v8si)__b);
}
/// Converts elements from two 256-bit vectors of [16 x i16] to 8-bit integers
/// using unsigned saturation, and returns the 256-bit result.
///
/// \code{.operation}
/// FOR i := 0 TO 7
/// j := i*16
/// k := i*8
/// result[7+k:k] := SATURATE8U(__a[15+j:j])
/// result[71+k:64+k] := SATURATE8U(__b[15+j:j])
/// result[135+k:128+k] := SATURATE8U(__a[143+j:128+j])
/// result[199+k:192+k] := SATURATE8U(__b[143+j:128+j])
/// ENDFOR
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VPACKUSWB instruction.
///
/// \param __a
/// A 256-bit vector of [16 x i16] used to generate result[63:0] and
/// result[191:128].
/// \param __b
/// A 256-bit vector of [16 x i16] used to generate result[127:64] and
/// result[255:192].
/// \returns A 256-bit integer vector containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_packus_epi16(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_ia32_packuswb256((__v16hi)__a, (__v16hi)__b);
}
/// Converts elements from two 256-bit vectors of [8 x i32] to 16-bit integers
/// using unsigned saturation, and returns the resulting 256-bit vector of
/// [16 x i16].
///
/// \code{.operation}
/// FOR i := 0 TO 3
/// j := i*32
/// k := i*16
/// result[15+k:k] := SATURATE16U(__V1[31+j:j])
/// result[79+k:64+k] := SATURATE16U(__V2[31+j:j])
/// result[143+k:128+k] := SATURATE16U(__V1[159+j:128+j])
/// result[207+k:192+k] := SATURATE16U(__V2[159+j:128+j])
/// ENDFOR
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VPACKUSDW instruction.
///
/// \param __V1
/// A 256-bit vector of [8 x i32] used to generate result[63:0] and
/// result[191:128].
/// \param __V2
/// A 256-bit vector of [8 x i32] used to generate result[127:64] and
/// result[255:192].
/// \returns A 256-bit vector of [16 x i16] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_packus_epi32(__m256i __V1, __m256i __V2)
{
return (__m256i) __builtin_ia32_packusdw256((__v8si)__V1, (__v8si)__V2);
}
/// Adds 8-bit integers from corresponding bytes of two 256-bit integer
/// vectors and returns the lower 8 bits of each sum in the corresponding
/// byte of the 256-bit integer vector result (overflow is ignored).
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VPADDB instruction.
///
/// \param __a
/// A 256-bit integer vector containing one of the source operands.
/// \param __b
/// A 256-bit integer vector containing one of the source operands.
/// \returns A 256-bit integer vector containing the sums.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_add_epi8(__m256i __a, __m256i __b)
{
return (__m256i)((__v32qu)__a + (__v32qu)__b);
}
/// Adds 16-bit integers from corresponding elements of two 256-bit vectors of
/// [16 x i16] and returns the lower 16 bits of each sum in the
/// corresponding element of the [16 x i16] result (overflow is ignored).
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VPADDW instruction.
///
/// \param __a
/// A 256-bit vector of [16 x i16] containing one of the source operands.
/// \param __b
/// A 256-bit vector of [16 x i16] containing one of the source operands.
/// \returns A 256-bit vector of [16 x i16] containing the sums.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_add_epi16(__m256i __a, __m256i __b)
{
return (__m256i)((__v16hu)__a + (__v16hu)__b);
}
/// Adds 32-bit integers from corresponding elements of two 256-bit vectors of
/// [8 x i32] and returns the lower 32 bits of each sum in the corresponding
/// element of the [8 x i32] result (overflow is ignored).
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VPADDD instruction.
///
/// \param __a
/// A 256-bit vector of [8 x i32] containing one of the source operands.
/// \param __b
/// A 256-bit vector of [8 x i32] containing one of the source operands.
/// \returns A 256-bit vector of [8 x i32] containing the sums.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_add_epi32(__m256i __a, __m256i __b)
{
return (__m256i)((__v8su)__a + (__v8su)__b);
}
/// Adds 64-bit integers from corresponding elements of two 256-bit vectors of
/// [4 x i64] and returns the lower 64 bits of each sum in the corresponding
/// element of the [4 x i64] result (overflow is ignored).
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VPADDQ instruction.
///
/// \param __a
/// A 256-bit vector of [4 x i64] containing one of the source operands.
/// \param __b
/// A 256-bit vector of [4 x i64] containing one of the source operands.
/// \returns A 256-bit vector of [4 x i64] containing the sums.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_add_epi64(__m256i __a, __m256i __b)
{
return (__m256i)((__v4du)__a + (__v4du)__b);
}
/// Adds 8-bit integers from corresponding bytes of two 256-bit integer
/// vectors using signed saturation, and returns each sum in the
/// corresponding byte of the 256-bit integer vector result.
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VPADDSB instruction.
///
/// \param __a
/// A 256-bit integer vector containing one of the source operands.
/// \param __b
/// A 256-bit integer vector containing one of the source operands.
/// \returns A 256-bit integer vector containing the sums.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_adds_epi8(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_elementwise_add_sat((__v32qs)__a, (__v32qs)__b);
}
/// Adds 16-bit integers from corresponding elements of two 256-bit vectors of
/// [16 x i16] using signed saturation, and returns the [16 x i16] result.
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VPADDSW instruction.
///
/// \param __a
/// A 256-bit vector of [16 x i16] containing one of the source operands.
/// \param __b
/// A 256-bit vector of [16 x i16] containing one of the source operands.
/// \returns A 256-bit vector of [16 x i16] containing the sums.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_adds_epi16(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_elementwise_add_sat((__v16hi)__a, (__v16hi)__b);
}
/// Adds 8-bit integers from corresponding bytes of two 256-bit integer
/// vectors using unsigned saturation, and returns each sum in the
/// corresponding byte of the 256-bit integer vector result.
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VPADDUSB instruction.
///
/// \param __a
/// A 256-bit integer vector containing one of the source operands.
/// \param __b
/// A 256-bit integer vector containing one of the source operands.
/// \returns A 256-bit integer vector containing the sums.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_adds_epu8(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_elementwise_add_sat((__v32qu)__a, (__v32qu)__b);
}
/// Adds 16-bit integers from corresponding elements of two 256-bit vectors of
/// [16 x i16] using unsigned saturation, and returns the [16 x i16] result.
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VPADDUSW instruction.
///
/// \param __a
/// A 256-bit vector of [16 x i16] containing one of the source operands.
/// \param __b
/// A 256-bit vector of [16 x i16] containing one of the source operands.
/// \returns A 256-bit vector of [16 x i16] containing the sums.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_adds_epu16(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_elementwise_add_sat((__v16hu)__a, (__v16hu)__b);
}
/// Uses the lower half of the 256-bit vector \a a as the upper half of a
/// temporary 256-bit value, and the lower half of the 256-bit vector \a b
/// as the lower half of the temporary value. Right-shifts the temporary
/// value by \a n bytes, and uses the lower 16 bytes of the shifted value
/// as the lower 16 bytes of the result. Uses the upper halves of \a a and
/// \a b to make another temporary value, right shifts by \a n, and uses
/// the lower 16 bytes of the shifted value as the upper 16 bytes of the
/// result.
///
/// \headerfile <immintrin.h>
///
/// \code
/// __m256i _mm256_alignr_epi8(__m256i a, __m256i b, const int n);
/// \endcode
///
/// This intrinsic corresponds to the \c VPALIGNR instruction.
///
/// \param a
/// A 256-bit integer vector containing source values.
/// \param b
/// A 256-bit integer vector containing source values.
/// \param n
/// An immediate value specifying the number of bytes to shift.
/// \returns A 256-bit integer vector containing the result.
#define _mm256_alignr_epi8(a, b, n) \
((__m256i)__builtin_ia32_palignr256((__v32qi)(__m256i)(a), \
(__v32qi)(__m256i)(b), (n)))
/// Computes the bitwise AND of the 256-bit integer vectors in \a __a and
/// \a __b.
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VPAND instruction.
///
/// \param __a
/// A 256-bit integer vector.
/// \param __b
/// A 256-bit integer vector.
/// \returns A 256-bit integer vector containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_and_si256(__m256i __a, __m256i __b)
{
return (__m256i)((__v4du)__a & (__v4du)__b);
}
/// Computes the bitwise AND of the 256-bit integer vector in \a __b with
/// the bitwise NOT of the 256-bit integer vector in \a __a.
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VPANDN instruction.
///
/// \param __a
/// A 256-bit integer vector.
/// \param __b
/// A 256-bit integer vector.
/// \returns A 256-bit integer vector containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_andnot_si256(__m256i __a, __m256i __b)
{
return (__m256i)(~(__v4du)__a & (__v4du)__b);
}
/// Computes the averages of the corresponding unsigned bytes in the two
/// 256-bit integer vectors in \a __a and \a __b and returns each
/// average in the corresponding byte of the 256-bit result.
///
/// \code{.operation}
/// FOR i := 0 TO 31
/// j := i*8
/// result[j+7:j] := (__a[j+7:j] + __b[j+7:j] + 1) >> 1
/// ENDFOR
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VPAVGB instruction.
///
/// \param __a
/// A 256-bit integer vector.
/// \param __b
/// A 256-bit integer vector.
/// \returns A 256-bit integer vector containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_avg_epu8(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_ia32_pavgb256((__v32qi)__a, (__v32qi)__b);
}
/// Computes the averages of the corresponding unsigned 16-bit integers in
/// the two 256-bit vectors of [16 x i16] in \a __a and \a __b and returns
/// each average in the corresponding element of the 256-bit result.
///
/// \code{.operation}
/// FOR i := 0 TO 15
/// j := i*16
/// result[j+15:j] := (__a[j+15:j] + __b[j+15:j] + 1) >> 1
/// ENDFOR
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VPAVGW instruction.
///
/// \param __a
/// A 256-bit vector of [16 x i16].
/// \param __b
/// A 256-bit vector of [16 x i16].
/// \returns A 256-bit vector of [16 x i16] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_avg_epu16(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_ia32_pavgw256((__v16hi)__a, (__v16hi)__b);
}
/// Merges 8-bit integer values from either of the two 256-bit vectors
/// \a __V1 or \a __V2, as specified by the 256-bit mask \a __M and returns
/// the resulting 256-bit integer vector.
///
/// \code{.operation}
/// FOR i := 0 TO 31
/// j := i*8
/// IF __M[7+i] == 0
/// result[7+j:j] := __V1[7+j:j]
/// ELSE
/// result[7+j:j] := __V2[7+j:j]
/// FI
/// ENDFOR
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VPBLENDVB instruction.
///
/// \param __V1
/// A 256-bit integer vector containing source values.
/// \param __V2
/// A 256-bit integer vector containing source values.
/// \param __M
/// A 256-bit integer vector, with bit [7] of each byte specifying the
/// source for each corresponding byte of the result. When the mask bit
/// is 0, the byte is copied from \a __V1; otherwise, it is copied from
/// \a __V2.
/// \returns A 256-bit integer vector containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_blendv_epi8(__m256i __V1, __m256i __V2, __m256i __M)
{
@@ -148,34 +559,171 @@ _mm256_blendv_epi8(__m256i __V1, __m256i __V2, __m256i __M)
(__v32qi)__M);
}
/// Merges 16-bit integer values from either of the two 256-bit vectors
/// \a V1 or \a V2, as specified by the immediate integer operand \a M,
/// and returns the resulting 256-bit vector of [16 x i16].
///
/// \code{.operation}
/// FOR i := 0 TO 7
/// j := i*16
/// IF M[i] == 0
/// result[7+j:j] := V1[7+j:j]
/// result[135+j:128+j] := V1[135+j:128+j]
/// ELSE
/// result[7+j:j] := V2[7+j:j]
/// result[135+j:128+j] := V2[135+j:128+j]
/// FI
/// ENDFOR
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// \code
/// __m256i _mm256_blend_epi16(__m256i V1, __m256i V2, const int M);
/// \endcode
///
/// This intrinsic corresponds to the \c VPBLENDW instruction.
///
/// \param V1
/// A 256-bit vector of [16 x i16] containing source values.
/// \param V2
/// A 256-bit vector of [16 x i16] containing source values.
/// \param M
/// An immediate 8-bit integer operand, with bits [7:0] specifying the
/// source for each element of the result. The position of the mask bit
/// corresponds to the index of a copied value. When a mask bit is 0, the
/// element is copied from \a V1; otherwise, it is copied from \a V2.
/// \a M[0] determines the source for elements 0 and 8, \a M[1] for
/// elements 1 and 9, and so forth.
/// \returns A 256-bit vector of [16 x i16] containing the result.
#define _mm256_blend_epi16(V1, V2, M) \
((__m256i)__builtin_ia32_pblendw256((__v16hi)(__m256i)(V1), \
(__v16hi)(__m256i)(V2), (int)(M)))
/// Compares corresponding bytes in the 256-bit integer vectors in \a __a and
/// \a __b for equality and returns the outcomes in the corresponding
/// bytes of the 256-bit result.
///
/// \code{.operation}
/// FOR i := 0 TO 31
/// j := i*8
/// result[j+7:j] := (__a[j+7:j] == __b[j+7:j]) ? 0xFF : 0
/// ENDFOR
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VPCMPEQB instruction.
///
/// \param __a
/// A 256-bit integer vector containing one of the inputs.
/// \param __b
/// A 256-bit integer vector containing one of the inputs.
/// \returns A 256-bit integer vector containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_cmpeq_epi8(__m256i __a, __m256i __b)
{
return (__m256i)((__v32qi)__a == (__v32qi)__b);
}
/// Compares corresponding elements in the 256-bit vectors of [16 x i16] in
/// \a __a and \a __b for equality and returns the outcomes in the
/// corresponding elements of the 256-bit result.
///
/// \code{.operation}
/// FOR i := 0 TO 15
/// j := i*16
/// result[j+15:j] := (__a[j+15:j] == __b[j+15:j]) ? 0xFFFF : 0
/// ENDFOR
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VPCMPEQW instruction.
///
/// \param __a
/// A 256-bit vector of [16 x i16] containing one of the inputs.
/// \param __b
/// A 256-bit vector of [16 x i16] containing one of the inputs.
/// \returns A 256-bit vector of [16 x i16] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_cmpeq_epi16(__m256i __a, __m256i __b)
{
return (__m256i)((__v16hi)__a == (__v16hi)__b);
}
/// Compares corresponding elements in the 256-bit vectors of [8 x i32] in
/// \a __a and \a __b for equality and returns the outcomes in the
/// corresponding elements of the 256-bit result.
///
/// \code{.operation}
/// FOR i := 0 TO 7
/// j := i*32
/// result[j+31:j] := (__a[j+31:j] == __b[j+31:j]) ? 0xFFFFFFFF : 0
/// ENDFOR
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VPCMPEQD instruction.
///
/// \param __a
/// A 256-bit vector of [8 x i32] containing one of the inputs.
/// \param __b
/// A 256-bit vector of [8 x i32] containing one of the inputs.
/// \returns A 256-bit vector of [8 x i32] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_cmpeq_epi32(__m256i __a, __m256i __b)
{
return (__m256i)((__v8si)__a == (__v8si)__b);
}
/// Compares corresponding elements in the 256-bit vectors of [4 x i64] in
/// \a __a and \a __b for equality and returns the outcomes in the
/// corresponding elements of the 256-bit result.
///
/// \code{.operation}
/// FOR i := 0 TO 3
/// j := i*64
/// result[j+63:j] := (__a[j+63:j] == __b[j+63:j]) ? 0xFFFFFFFFFFFFFFFF : 0
/// ENDFOR
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VPCMPEQQ instruction.
///
/// \param __a
/// A 256-bit vector of [4 x i64] containing one of the inputs.
/// \param __b
/// A 256-bit vector of [4 x i64] containing one of the inputs.
/// \returns A 256-bit vector of [4 x i64] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_cmpeq_epi64(__m256i __a, __m256i __b)
{
return (__m256i)((__v4di)__a == (__v4di)__b);
}
/// Compares corresponding signed bytes in the 256-bit integer vectors in
/// \a __a and \a __b for greater-than and returns the outcomes in the
/// corresponding bytes of the 256-bit result.
///
/// \code{.operation}
/// FOR i := 0 TO 31
/// j := i*8
/// result[j+7:j] := (__a[j+7:j] > __b[j+7:j]) ? 0xFF : 0
/// ENDFOR
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VPCMPGTB instruction.
///
/// \param __a
/// A 256-bit integer vector containing one of the inputs.
/// \param __b
/// A 256-bit integer vector containing one of the inputs.
/// \returns A 256-bit integer vector containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_cmpgt_epi8(__m256i __a, __m256i __b)
{
@@ -184,138 +732,575 @@ _mm256_cmpgt_epi8(__m256i __a, __m256i __b)
return (__m256i)((__v32qs)__a > (__v32qs)__b);
}
/// Compares corresponding signed elements in the 256-bit vectors of
/// [16 x i16] in \a __a and \a __b for greater-than and returns the
/// outcomes in the corresponding elements of the 256-bit result.
///
/// \code{.operation}
/// FOR i := 0 TO 15
/// j := i*16
/// result[j+15:j] := (__a[j+15:j] > __b[j+15:j]) ? 0xFFFF : 0
/// ENDFOR
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VPCMPGTW instruction.
///
/// \param __a
/// A 256-bit vector of [16 x i16] containing one of the inputs.
/// \param __b
/// A 256-bit vector of [16 x i16] containing one of the inputs.
/// \returns A 256-bit vector of [16 x i16] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_cmpgt_epi16(__m256i __a, __m256i __b)
{
return (__m256i)((__v16hi)__a > (__v16hi)__b);
}
/// Compares corresponding signed elements in the 256-bit vectors of
/// [8 x i32] in \a __a and \a __b for greater-than and returns the
/// outcomes in the corresponding elements of the 256-bit result.
///
/// \code{.operation}
/// FOR i := 0 TO 7
/// j := i*32
/// result[j+31:j] := (__a[j+31:j] > __b[j+31:j]) ? 0xFFFFFFFF : 0
/// ENDFOR
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VPCMPGTD instruction.
///
/// \param __a
/// A 256-bit vector of [8 x i32] containing one of the inputs.
/// \param __b
/// A 256-bit vector of [8 x i32] containing one of the inputs.
/// \returns A 256-bit vector of [8 x i32] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_cmpgt_epi32(__m256i __a, __m256i __b)
{
return (__m256i)((__v8si)__a > (__v8si)__b);
}
/// Compares corresponding signed elements in the 256-bit vectors of
/// [4 x i64] in \a __a and \a __b for greater-than and returns the
/// outcomes in the corresponding elements of the 256-bit result.
///
/// \code{.operation}
/// FOR i := 0 TO 3
/// j := i*64
/// result[j+63:j] := (__a[j+63:j] > __b[j+63:j]) ? 0xFFFFFFFFFFFFFFFF : 0
/// ENDFOR
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VPCMPGTQ instruction.
///
/// \param __a
/// A 256-bit vector of [4 x i64] containing one of the inputs.
/// \param __b
/// A 256-bit vector of [4 x i64] containing one of the inputs.
/// \returns A 256-bit vector of [4 x i64] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_cmpgt_epi64(__m256i __a, __m256i __b)
{
return (__m256i)((__v4di)__a > (__v4di)__b);
}
/// Horizontally adds the adjacent pairs of 16-bit integers from two 256-bit
/// vectors of [16 x i16] and returns the lower 16 bits of each sum in an
/// element of the [16 x i16] result (overflow is ignored). Sums from
/// \a __a are returned in the lower 64 bits of each 128-bit half of the
/// result; sums from \a __b are returned in the upper 64 bits of each
/// 128-bit half of the result.
///
/// \code{.operation}
/// FOR i := 0 TO 1
/// j := i*128
/// result[j+15:j] := __a[j+15:j] + __a[j+31:j+16]
/// result[j+31:j+16] := __a[j+47:j+32] + __a[j+63:j+48]
/// result[j+47:j+32] := __a[j+79:j+64] + __a[j+95:j+80]
/// result[j+63:j+48] := __a[j+111:j+96] + __a[j+127:j+112]
/// result[j+79:j+64] := __b[j+15:j] + __b[j+31:j+16]
/// result[j+95:j+80] := __b[j+47:j+32] + __b[j+63:j+48]
/// result[j+111:j+96] := __b[j+79:j+64] + __b[j+95:j+80]
/// result[j+127:j+112] := __b[j+111:j+96] + __b[j+127:j+112]
/// ENDFOR
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VPHADDW instruction.
///
/// \param __a
/// A 256-bit vector of [16 x i16] containing one of the source operands.
/// \param __b
/// A 256-bit vector of [16 x i16] containing one of the source operands.
/// \returns A 256-bit vector of [16 x i16] containing the sums.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_hadd_epi16(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_ia32_phaddw256((__v16hi)__a, (__v16hi)__b);
}
/// Horizontally adds the adjacent pairs of 32-bit integers from two 256-bit
/// vectors of [8 x i32] and returns the lower 32 bits of each sum in an
/// element of the [8 x i32] result (overflow is ignored). Sums from \a __a
/// are returned in the lower 64 bits of each 128-bit half of the result;
/// sums from \a __b are returned in the upper 64 bits of each 128-bit half
/// of the result.
///
/// \code{.operation}
/// FOR i := 0 TO 1
/// j := i*128
/// result[j+31:j] := __a[j+31:j] + __a[j+63:j+32]
/// result[j+63:j+32] := __a[j+95:j+64] + __a[j+127:j+96]
/// result[j+95:j+64] := __b[j+31:j] + __b[j+63:j+32]
/// result[j+127:j+96] := __b[j+95:j+64] + __b[j+127:j+96]
/// ENDFOR
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VPHADDD instruction.
///
/// \param __a
/// A 256-bit vector of [8 x i32] containing one of the source operands.
/// \param __b
/// A 256-bit vector of [8 x i32] containing one of the source operands.
/// \returns A 256-bit vector of [8 x i32] containing the sums.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_hadd_epi32(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_ia32_phaddd256((__v8si)__a, (__v8si)__b);
}
/// Horizontally adds the adjacent pairs of 16-bit integers from two 256-bit
/// vectors of [16 x i16] using signed saturation and returns each sum in
/// an element of the [16 x i16] result. Sums from \a __a are returned in
/// the lower 64 bits of each 128-bit half of the result; sums from \a __b
/// are returned in the upper 64 bits of each 128-bit half of the result.
///
/// \code{.operation}
/// FOR i := 0 TO 1
/// j := i*128
/// result[j+15:j] := SATURATE16(__a[j+15:j] + __a[j+31:j+16])
/// result[j+31:j+16] := SATURATE16(__a[j+47:j+32] + __a[j+63:j+48])
/// result[j+47:j+32] := SATURATE16(__a[j+79:j+64] + __a[j+95:j+80])
/// result[j+63:j+48] := SATURATE16(__a[j+111:j+96] + __a[j+127:j+112])
/// result[j+79:j+64] := SATURATE16(__b[j+15:j] + __b[j+31:j+16])
/// result[j+95:j+80] := SATURATE16(__b[j+47:j+32] + __b[j+63:j+48])
/// result[j+111:j+96] := SATURATE16(__b[j+79:j+64] + __b[j+95:j+80])
/// result[j+127:j+112] := SATURATE16(__b[j+111:j+96] + __b[j+127:j+112])
/// ENDFOR
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VPHADDSW instruction.
///
/// \param __a
/// A 256-bit vector of [16 x i16] containing one of the source operands.
/// \param __b
/// A 256-bit vector of [16 x i16] containing one of the source operands.
/// \returns A 256-bit vector of [16 x i16] containing the sums.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_hadds_epi16(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_ia32_phaddsw256((__v16hi)__a, (__v16hi)__b);
}
/// Horizontally subtracts adjacent pairs of 16-bit integers from two 256-bit
/// vectors of [16 x i16] and returns the lower 16 bits of each difference
/// in an element of the [16 x i16] result (overflow is ignored).
/// Differences from \a __a are returned in the lower 64 bits of each
/// 128-bit half of the result; differences from \a __b are returned in the
/// upper 64 bits of each 128-bit half of the result.
///
/// \code{.operation}
/// FOR i := 0 TO 1
/// j := i*128
/// result[j+15:j] := __a[j+15:j] - __a[j+31:j+16]
/// result[j+31:j+16] := __a[j+47:j+32] - __a[j+63:j+48]
/// result[j+47:j+32] := __a[j+79:j+64] - __a[j+95:j+80]
/// result[j+63:j+48] := __a[j+111:j+96] - __a[j+127:j+112]
/// result[j+79:j+64] := __b[j+15:j] - __b[j+31:j+16]
/// result[j+95:j+80] := __b[j+47:j+32] - __b[j+63:j+48]
/// result[j+111:j+96] := __b[j+79:j+64] - __b[j+95:j+80]
/// result[j+127:j+112] := __b[j+111:j+96] - __b[j+127:j+112]
/// ENDFOR
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VPHSUBW instruction.
///
/// \param __a
/// A 256-bit vector of [16 x i16] containing one of the source operands.
/// \param __b
/// A 256-bit vector of [16 x i16] containing one of the source operands.
/// \returns A 256-bit vector of [16 x i16] containing the differences.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_hsub_epi16(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_ia32_phsubw256((__v16hi)__a, (__v16hi)__b);
}
/// Horizontally subtracts adjacent pairs of 32-bit integers from two 256-bit
/// vectors of [8 x i32] and returns the lower 32 bits of each difference in
/// an element of the [8 x i32] result (overflow is ignored). Differences
/// from \a __a are returned in the lower 64 bits of each 128-bit half of
/// the result; differences from \a __b are returned in the upper 64 bits
/// of each 128-bit half of the result.
///
/// \code{.operation}
/// FOR i := 0 TO 1
/// j := i*128
/// result[j+31:j] := __a[j+31:j] - __a[j+63:j+32]
/// result[j+63:j+32] := __a[j+95:j+64] - __a[j+127:j+96]
/// result[j+95:j+64] := __b[j+31:j] - __b[j+63:j+32]
/// result[j+127:j+96] := __b[j+95:j+64] - __b[j+127:j+96]
/// ENDFOR
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VPHSUBD instruction.
///
/// \param __a
/// A 256-bit vector of [8 x i32] containing one of the source operands.
/// \param __b
/// A 256-bit vector of [8 x i32] containing one of the source operands.
/// \returns A 256-bit vector of [8 x i32] containing the differences.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_hsub_epi32(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_ia32_phsubd256((__v8si)__a, (__v8si)__b);
}
/// Horizontally subtracts adjacent pairs of 16-bit integers from two 256-bit
/// vectors of [16 x i16] using signed saturation and returns each sum in
/// an element of the [16 x i16] result. Differences from \a __a are
/// returned in the lower 64 bits of each 128-bit half of the result;
/// differences from \a __b are returned in the upper 64 bits of each
/// 128-bit half of the result.
///
/// \code{.operation}
/// FOR i := 0 TO 1
/// j := i*128
/// result[j+15:j] := SATURATE16(__a[j+15:j] - __a[j+31:j+16])
/// result[j+31:j+16] := SATURATE16(__a[j+47:j+32] - __a[j+63:j+48])
/// result[j+47:j+32] := SATURATE16(__a[j+79:j+64] - __a[j+95:j+80])
/// result[j+63:j+48] := SATURATE16(__a[j+111:j+96] - __a[j+127:j+112])
/// result[j+79:j+64] := SATURATE16(__b[j+15:j] - __b[j+31:j+16])
/// result[j+95:j+80] := SATURATE16(__b[j+47:j+32] - __b[j+63:j+48])
/// result[j+111:j+96] := SATURATE16(__b[j+79:j+64] - __b[j+95:j+80])
/// result[j+127:j+112] := SATURATE16(__b[j+111:j+96] - __b[j+127:j+112])
/// ENDFOR
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VPHSUBSW instruction.
///
/// \param __a
/// A 256-bit vector of [16 x i16] containing one of the source operands.
/// \param __b
/// A 256-bit vector of [16 x i16] containing one of the source operands.
/// \returns A 256-bit vector of [16 x i16] containing the differences.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_hsubs_epi16(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_ia32_phsubsw256((__v16hi)__a, (__v16hi)__b);
}
/// Multiplies each unsigned byte from the 256-bit integer vector in \a __a
/// with the corresponding signed byte from the 256-bit integer vector in
/// \a __b, forming signed 16-bit intermediate products. Adds adjacent
/// pairs of those products using signed saturation to form 16-bit sums
/// returned as elements of the [16 x i16] result.
///
/// \code{.operation}
/// FOR i := 0 TO 15
/// j := i*16
/// temp1 := __a[j+7:j] * __b[j+7:j]
/// temp2 := __a[j+15:j+8] * __b[j+15:j+8]
/// result[j+15:j] := SATURATE16(temp1 + temp2)
/// ENDFOR
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VPMADDUBSW instruction.
///
/// \param __a
/// A 256-bit vector containing one of the source operands.
/// \param __b
/// A 256-bit vector containing one of the source operands.
/// \returns A 256-bit vector of [16 x i16] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maddubs_epi16(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_ia32_pmaddubsw256((__v32qi)__a, (__v32qi)__b);
}
/// Multiplies corresponding 16-bit elements of two 256-bit vectors of
/// [16 x i16], forming 32-bit intermediate products, and adds pairs of
/// those products to form 32-bit sums returned as elements of the
/// [8 x i32] result.
///
/// There is only one wraparound case: when all four of the 16-bit sources
/// are \c 0x8000, the result will be \c 0x80000000.
///
/// \code{.operation}
/// FOR i := 0 TO 7
/// j := i*32
/// temp1 := __a[j+15:j] * __b[j+15:j]
/// temp2 := __a[j+31:j+16] * __b[j+31:j+16]
/// result[j+31:j] := temp1 + temp2
/// ENDFOR
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VPMADDWD instruction.
///
/// \param __a
/// A 256-bit vector of [16 x i16] containing one of the source operands.
/// \param __b
/// A 256-bit vector of [16 x i16] containing one of the source operands.
/// \returns A 256-bit vector of [8 x i32] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_madd_epi16(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_ia32_pmaddwd256((__v16hi)__a, (__v16hi)__b);
}
/// Compares the corresponding signed bytes in the two 256-bit integer vectors
/// in \a __a and \a __b and returns the larger of each pair in the
/// corresponding byte of the 256-bit result.
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VPMAXSB instruction.
///
/// \param __a
/// A 256-bit integer vector.
/// \param __b
/// A 256-bit integer vector.
/// \returns A 256-bit integer vector containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_max_epi8(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_elementwise_max((__v32qs)__a, (__v32qs)__b);
}
/// Compares the corresponding signed 16-bit integers in the two 256-bit
/// vectors of [16 x i16] in \a __a and \a __b and returns the larger of
/// each pair in the corresponding element of the 256-bit result.
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VPMAXSW instruction.
///
/// \param __a
/// A 256-bit vector of [16 x i16].
/// \param __b
/// A 256-bit vector of [16 x i16].
/// \returns A 256-bit vector of [16 x i16] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_max_epi16(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_elementwise_max((__v16hi)__a, (__v16hi)__b);
}
/// Compares the corresponding signed 32-bit integers in the two 256-bit
/// vectors of [8 x i32] in \a __a and \a __b and returns the larger of
/// each pair in the corresponding element of the 256-bit result.
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VPMAXSD instruction.
///
/// \param __a
/// A 256-bit vector of [8 x i32].
/// \param __b
/// A 256-bit vector of [8 x i32].
/// \returns A 256-bit vector of [8 x i32] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_max_epi32(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_elementwise_max((__v8si)__a, (__v8si)__b);
}
/// Compares the corresponding unsigned bytes in the two 256-bit integer
/// vectors in \a __a and \a __b and returns the larger of each pair in
/// the corresponding byte of the 256-bit result.
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VPMAXUB instruction.
///
/// \param __a
/// A 256-bit integer vector.
/// \param __b
/// A 256-bit integer vector.
/// \returns A 256-bit integer vector containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_max_epu8(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_elementwise_max((__v32qu)__a, (__v32qu)__b);
}
/// Compares the corresponding unsigned 16-bit integers in the two 256-bit
/// vectors of [16 x i16] in \a __a and \a __b and returns the larger of
/// each pair in the corresponding element of the 256-bit result.
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VPMAXUW instruction.
///
/// \param __a
/// A 256-bit vector of [16 x i16].
/// \param __b
/// A 256-bit vector of [16 x i16].
/// \returns A 256-bit vector of [16 x i16] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_max_epu16(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_elementwise_max((__v16hu)__a, (__v16hu)__b);
}
/// Compares the corresponding unsigned 32-bit integers in the two 256-bit
/// vectors of [8 x i32] in \a __a and \a __b and returns the larger of
/// each pair in the corresponding element of the 256-bit result.
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VPMAXUD instruction.
///
/// \param __a
/// A 256-bit vector of [8 x i32].
/// \param __b
/// A 256-bit vector of [8 x i32].
/// \returns A 256-bit vector of [8 x i32] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_max_epu32(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_elementwise_max((__v8su)__a, (__v8su)__b);
}
/// Compares the corresponding signed bytes in the two 256-bit integer vectors
/// in \a __a and \a __b and returns the smaller of each pair in the
/// corresponding byte of the 256-bit result.
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VPMINSB instruction.
///
/// \param __a
/// A 256-bit integer vector.
/// \param __b
/// A 256-bit integer vector.
/// \returns A 256-bit integer vector containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_min_epi8(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_elementwise_min((__v32qs)__a, (__v32qs)__b);
}
/// Compares the corresponding signed 16-bit integers in the two 256-bit
/// vectors of [16 x i16] in \a __a and \a __b and returns the smaller of
/// each pair in the corresponding element of the 256-bit result.
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VPMINSW instruction.
///
/// \param __a
/// A 256-bit vector of [16 x i16].
/// \param __b
/// A 256-bit vector of [16 x i16].
/// \returns A 256-bit vector of [16 x i16] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_min_epi16(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_elementwise_min((__v16hi)__a, (__v16hi)__b);
}
/// Compares the corresponding signed 32-bit integers in the two 256-bit
/// vectors of [8 x i32] in \a __a and \a __b and returns the smaller of
/// each pair in the corresponding element of the 256-bit result.
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VPMINSD instruction.
///
/// \param __a
/// A 256-bit vector of [8 x i32].
/// \param __b
/// A 256-bit vector of [8 x i32].
/// \returns A 256-bit vector of [8 x i32] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_min_epi32(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_elementwise_min((__v8si)__a, (__v8si)__b);
}
/// Compares the corresponding unsigned bytes in the two 256-bit integer
/// vectors in \a __a and \a __b and returns the smaller of each pair in
/// the corresponding byte of the 256-bit result.
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VPMINUB instruction.
///
/// \param __a
/// A 256-bit integer vector.
/// \param __b
/// A 256-bit integer vector.
/// \returns A 256-bit integer vector containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_min_epu8(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_elementwise_min((__v32qu)__a, (__v32qu)__b);
}
/// Compares the corresponding unsigned 16-bit integers in the two 256-bit
/// vectors of [16 x i16] in \a __a and \a __b and returns the smaller of
/// each pair in the corresponding element of the 256-bit result.
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VPMINUW instruction.
///
/// \param __a
/// A 256-bit vector of [16 x i16].
/// \param __b
/// A 256-bit vector of [16 x i16].
/// \returns A 256-bit vector of [16 x i16] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_min_epu16(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_elementwise_min((__v16hu)__a, (__v16hu)__b);
}
/// Compares the corresponding unsigned 32-bit integers in the two 256-bit
/// vectors of [8 x i32] in \a __a and \a __b and returns the smaller of
/// each pair in the corresponding element of the 256-bit result.
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VPMINUD instruction.
///
/// \param __a
/// A 256-bit vector of [8 x i32].
/// \param __b
/// A 256-bit vector of [8 x i32].
/// \returns A 256-bit vector of [8 x i32] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_min_epu32(__m256i __a, __m256i __b)
{
@@ -328,6 +1313,26 @@ _mm256_movemask_epi8(__m256i __a)
return __builtin_ia32_pmovmskb256((__v32qi)__a);
}
/// Sign-extends bytes from the 128-bit integer vector in \a __V and returns
/// the 16-bit values in the corresponding elements of a 256-bit vector
/// of [16 x i16].
///
/// \code{.operation}
/// FOR i := 0 TO 15
/// j := i*8
/// k := i*16
/// result[k+15:k] := SignExtend(__V[j+7:j])
/// ENDFOR
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VPMOVSXBW instruction.
///
/// \param __V
/// A 128-bit integer vector containing the source bytes.
/// \returns A 256-bit vector of [16 x i16] containing the sign-extended
/// values.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_cvtepi8_epi16(__m128i __V)
{
@@ -336,6 +1341,26 @@ _mm256_cvtepi8_epi16(__m128i __V)
return (__m256i)__builtin_convertvector((__v16qs)__V, __v16hi);
}
/// Sign-extends bytes from the lower half of the 128-bit integer vector in
/// \a __V and returns the 32-bit values in the corresponding elements of a
/// 256-bit vector of [8 x i32].
///
/// \code{.operation}
/// FOR i := 0 TO 7
/// j := i*8
/// k := i*32
/// result[k+31:k] := SignExtend(__V[j+7:j])
/// ENDFOR
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VPMOVSXBD instruction.
///
/// \param __V
/// A 128-bit integer vector containing the source bytes.
/// \returns A 256-bit vector of [8 x i32] containing the sign-extended
/// values.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_cvtepi8_epi32(__m128i __V)
{
@@ -344,6 +1369,25 @@ _mm256_cvtepi8_epi32(__m128i __V)
return (__m256i)__builtin_convertvector(__builtin_shufflevector((__v16qs)__V, (__v16qs)__V, 0, 1, 2, 3, 4, 5, 6, 7), __v8si);
}
/// Sign-extends the first four bytes from the 128-bit integer vector in
/// \a __V and returns the 64-bit values in the corresponding elements of a
/// 256-bit vector of [4 x i64].
///
/// \code{.operation}
/// result[63:0] := SignExtend(__V[7:0])
/// result[127:64] := SignExtend(__V[15:8])
/// result[191:128] := SignExtend(__V[23:16])
/// result[255:192] := SignExtend(__V[31:24])
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VPMOVSXBQ instruction.
///
/// \param __V
/// A 128-bit integer vector containing the source bytes.
/// \returns A 256-bit vector of [4 x i64] containing the sign-extended
/// values.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_cvtepi8_epi64(__m128i __V)
{
@@ -352,357 +1396,1571 @@ _mm256_cvtepi8_epi64(__m128i __V)
return (__m256i)__builtin_convertvector(__builtin_shufflevector((__v16qs)__V, (__v16qs)__V, 0, 1, 2, 3), __v4di);
}
/// Sign-extends 16-bit elements from the 128-bit vector of [8 x i16] in
/// \a __V and returns the 32-bit values in the corresponding elements of a
/// 256-bit vector of [8 x i32].
///
/// \code{.operation}
/// FOR i := 0 TO 7
/// j := i*16
/// k := i*32
/// result[k+31:k] := SignExtend(__V[j+15:j])
/// ENDFOR
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VPMOVSXWD instruction.
///
/// \param __V
/// A 128-bit vector of [8 x i16] containing the source values.
/// \returns A 256-bit vector of [8 x i32] containing the sign-extended
/// values.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_cvtepi16_epi32(__m128i __V)
{
return (__m256i)__builtin_convertvector((__v8hi)__V, __v8si);
}
/// Sign-extends 16-bit elements from the lower half of the 128-bit vector of
/// [8 x i16] in \a __V and returns the 64-bit values in the corresponding
/// elements of a 256-bit vector of [4 x i64].
///
/// \code{.operation}
/// result[63:0] := SignExtend(__V[15:0])
/// result[127:64] := SignExtend(__V[31:16])
/// result[191:128] := SignExtend(__V[47:32])
/// result[255:192] := SignExtend(__V[64:48])
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VPMOVSXWQ instruction.
///
/// \param __V
/// A 128-bit vector of [8 x i16] containing the source values.
/// \returns A 256-bit vector of [4 x i64] containing the sign-extended
/// values.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_cvtepi16_epi64(__m128i __V)
{
return (__m256i)__builtin_convertvector(__builtin_shufflevector((__v8hi)__V, (__v8hi)__V, 0, 1, 2, 3), __v4di);
}
/// Sign-extends 32-bit elements from the 128-bit vector of [4 x i32] in
/// \a __V and returns the 64-bit values in the corresponding elements of a
/// 256-bit vector of [4 x i64].
///
/// \code{.operation}
/// result[63:0] := SignExtend(__V[31:0])
/// result[127:64] := SignExtend(__V[63:32])
/// result[191:128] := SignExtend(__V[95:64])
/// result[255:192] := SignExtend(__V[127:96])
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VPMOVSXDQ instruction.
///
/// \param __V
/// A 128-bit vector of [4 x i32] containing the source values.
/// \returns A 256-bit vector of [4 x i64] containing the sign-extended
/// values.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_cvtepi32_epi64(__m128i __V)
{
return (__m256i)__builtin_convertvector((__v4si)__V, __v4di);
}
/// Zero-extends bytes from the 128-bit integer vector in \a __V and returns
/// the 16-bit values in the corresponding elements of a 256-bit vector
/// of [16 x i16].
///
/// \code{.operation}
/// FOR i := 0 TO 15
/// j := i*8
/// k := i*16
/// result[k+15:k] := ZeroExtend(__V[j+7:j])
/// ENDFOR
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VPMOVZXBW instruction.
///
/// \param __V
/// A 128-bit integer vector containing the source bytes.
/// \returns A 256-bit vector of [16 x i16] containing the zero-extended
/// values.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_cvtepu8_epi16(__m128i __V)
{
return (__m256i)__builtin_convertvector((__v16qu)__V, __v16hi);
}
/// Zero-extends bytes from the lower half of the 128-bit integer vector in
/// \a __V and returns the 32-bit values in the corresponding elements of a
/// 256-bit vector of [8 x i32].
///
/// \code{.operation}
/// FOR i := 0 TO 7
/// j := i*8
/// k := i*32
/// result[k+31:k] := ZeroExtend(__V[j+7:j])
/// ENDFOR
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VPMOVZXBD instruction.
///
/// \param __V
/// A 128-bit integer vector containing the source bytes.
/// \returns A 256-bit vector of [8 x i32] containing the zero-extended
/// values.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_cvtepu8_epi32(__m128i __V)
{
return (__m256i)__builtin_convertvector(__builtin_shufflevector((__v16qu)__V, (__v16qu)__V, 0, 1, 2, 3, 4, 5, 6, 7), __v8si);
}
/// Zero-extends the first four bytes from the 128-bit integer vector in
/// \a __V and returns the 64-bit values in the corresponding elements of a
/// 256-bit vector of [4 x i64].
///
/// \code{.operation}
/// result[63:0] := ZeroExtend(__V[7:0])
/// result[127:64] := ZeroExtend(__V[15:8])
/// result[191:128] := ZeroExtend(__V[23:16])
/// result[255:192] := ZeroExtend(__V[31:24])
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VPMOVZXBQ instruction.
///
/// \param __V
/// A 128-bit integer vector containing the source bytes.
/// \returns A 256-bit vector of [4 x i64] containing the zero-extended
/// values.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_cvtepu8_epi64(__m128i __V)
{
return (__m256i)__builtin_convertvector(__builtin_shufflevector((__v16qu)__V, (__v16qu)__V, 0, 1, 2, 3), __v4di);
}
/// Zero-extends 16-bit elements from the 128-bit vector of [8 x i16] in
/// \a __V and returns the 32-bit values in the corresponding elements of a
/// 256-bit vector of [8 x i32].
///
/// \code{.operation}
/// FOR i := 0 TO 7
/// j := i*16
/// k := i*32
/// result[k+31:k] := ZeroExtend(__V[j+15:j])
/// ENDFOR
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VPMOVZXWD instruction.
///
/// \param __V
/// A 128-bit vector of [8 x i16] containing the source values.
/// \returns A 256-bit vector of [8 x i32] containing the zero-extended
/// values.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_cvtepu16_epi32(__m128i __V)
{
return (__m256i)__builtin_convertvector((__v8hu)__V, __v8si);
}
/// Zero-extends 16-bit elements from the lower half of the 128-bit vector of
/// [8 x i16] in \a __V and returns the 64-bit values in the corresponding
/// elements of a 256-bit vector of [4 x i64].
///
/// \code{.operation}
/// result[63:0] := ZeroExtend(__V[15:0])
/// result[127:64] := ZeroExtend(__V[31:16])
/// result[191:128] := ZeroExtend(__V[47:32])
/// result[255:192] := ZeroExtend(__V[64:48])
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VPMOVSXWQ instruction.
///
/// \param __V
/// A 128-bit vector of [8 x i16] containing the source values.
/// \returns A 256-bit vector of [4 x i64] containing the zero-extended
/// values.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_cvtepu16_epi64(__m128i __V)
{
return (__m256i)__builtin_convertvector(__builtin_shufflevector((__v8hu)__V, (__v8hu)__V, 0, 1, 2, 3), __v4di);
}
/// Zero-extends 32-bit elements from the 128-bit vector of [4 x i32] in
/// \a __V and returns the 64-bit values in the corresponding elements of a
/// 256-bit vector of [4 x i64].
///
/// \code{.operation}
/// result[63:0] := ZeroExtend(__V[31:0])
/// result[127:64] := ZeroExtend(__V[63:32])
/// result[191:128] := ZeroExtend(__V[95:64])
/// result[255:192] := ZeroExtend(__V[127:96])
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VPMOVZXDQ instruction.
///
/// \param __V
/// A 128-bit vector of [4 x i32] containing the source values.
/// \returns A 256-bit vector of [4 x i64] containing the zero-extended
/// values.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_cvtepu32_epi64(__m128i __V)
{
return (__m256i)__builtin_convertvector((__v4su)__V, __v4di);
}
/// Multiplies signed 32-bit integers from even-numbered elements of two
/// 256-bit vectors of [8 x i32] and returns the 64-bit products in the
/// [4 x i64] result.
///
/// \code{.operation}
/// result[63:0] := __a[31:0] * __b[31:0]
/// result[127:64] := __a[95:64] * __b[95:64]
/// result[191:128] := __a[159:128] * __b[159:128]
/// result[255:192] := __a[223:192] * __b[223:192]
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VPMULDQ instruction.
///
/// \param __a
/// A 256-bit vector of [8 x i32] containing one of the source operands.
/// \param __b
/// A 256-bit vector of [8 x i32] containing one of the source operands.
/// \returns A 256-bit vector of [4 x i64] containing the products.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mul_epi32(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_ia32_pmuldq256((__v8si)__a, (__v8si)__b);
}
/// Multiplies signed 16-bit integer elements of two 256-bit vectors of
/// [16 x i16], truncates the 32-bit results to the most significant 18
/// bits, rounds by adding 1, and returns bits [16:1] of each rounded
/// product in the [16 x i16] result.
///
/// \code{.operation}
/// FOR i := 0 TO 15
/// j := i*16
/// temp := ((__a[j+15:j] * __b[j+15:j]) >> 14) + 1
/// result[j+15:j] := temp[16:1]
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VPMULHRSW instruction.
///
/// \param __a
/// A 256-bit vector of [16 x i16] containing one of the source operands.
/// \param __b
/// A 256-bit vector of [16 x i16] containing one of the source operands.
/// \returns A 256-bit vector of [16 x i16] containing the rounded products.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mulhrs_epi16(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_ia32_pmulhrsw256((__v16hi)__a, (__v16hi)__b);
}
/// Multiplies unsigned 16-bit integer elements of two 256-bit vectors of
/// [16 x i16], and returns the upper 16 bits of each 32-bit product in the
/// [16 x i16] result.
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VPMULHUW instruction.
///
/// \param __a
/// A 256-bit vector of [16 x i16] containing one of the source operands.
/// \param __b
/// A 256-bit vector of [16 x i16] containing one of the source operands.
/// \returns A 256-bit vector of [16 x i16] containing the products.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mulhi_epu16(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_ia32_pmulhuw256((__v16hi)__a, (__v16hi)__b);
}
/// Multiplies signed 16-bit integer elements of two 256-bit vectors of
/// [16 x i16], and returns the upper 16 bits of each 32-bit product in the
/// [16 x i16] result.
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VPMULHW instruction.
///
/// \param __a
/// A 256-bit vector of [16 x i16] containing one of the source operands.
/// \param __b
/// A 256-bit vector of [16 x i16] containing one of the source operands.
/// \returns A 256-bit vector of [16 x i16] containing the products.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mulhi_epi16(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_ia32_pmulhw256((__v16hi)__a, (__v16hi)__b);
}
/// Multiplies signed 16-bit integer elements of two 256-bit vectors of
/// [16 x i16], and returns the lower 16 bits of each 32-bit product in the
/// [16 x i16] result.
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VPMULLW instruction.
///
/// \param __a
/// A 256-bit vector of [16 x i16] containing one of the source operands.
/// \param __b
/// A 256-bit vector of [16 x i16] containing one of the source operands.
/// \returns A 256-bit vector of [16 x i16] containing the products.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mullo_epi16(__m256i __a, __m256i __b)
{
return (__m256i)((__v16hu)__a * (__v16hu)__b);
}
/// Multiplies signed 32-bit integer elements of two 256-bit vectors of
/// [8 x i32], and returns the lower 32 bits of each 64-bit product in the
/// [8 x i32] result.
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VPMULLD instruction.
///
/// \param __a
/// A 256-bit vector of [8 x i32] containing one of the source operands.
/// \param __b
/// A 256-bit vector of [8 x i32] containing one of the source operands.
/// \returns A 256-bit vector of [8 x i32] containing the products.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mullo_epi32 (__m256i __a, __m256i __b)
{
return (__m256i)((__v8su)__a * (__v8su)__b);
}
/// Multiplies unsigned 32-bit integers from even-numered elements of two
/// 256-bit vectors of [8 x i32] and returns the 64-bit products in the
/// [4 x i64] result.
///
/// \code{.operation}
/// result[63:0] := __a[31:0] * __b[31:0]
/// result[127:64] := __a[95:64] * __b[95:64]
/// result[191:128] := __a[159:128] * __b[159:128]
/// result[255:192] := __a[223:192] * __b[223:192]
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VPMULUDQ instruction.
///
/// \param __a
/// A 256-bit vector of [8 x i32] containing one of the source operands.
/// \param __b
/// A 256-bit vector of [8 x i32] containing one of the source operands.
/// \returns A 256-bit vector of [4 x i64] containing the products.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mul_epu32(__m256i __a, __m256i __b)
{
return __builtin_ia32_pmuludq256((__v8si)__a, (__v8si)__b);
}
/// Computes the bitwise OR of the 256-bit integer vectors in \a __a and
/// \a __b.
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VPOR instruction.
///
/// \param __a
/// A 256-bit integer vector.
/// \param __b
/// A 256-bit integer vector.
/// \returns A 256-bit integer vector containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_or_si256(__m256i __a, __m256i __b)
{
return (__m256i)((__v4du)__a | (__v4du)__b);
}
/// Computes four sum of absolute difference (SAD) operations on sets of eight
/// unsigned 8-bit integers from the 256-bit integer vectors \a __a and
/// \a __b.
///
/// One SAD result is computed for each set of eight bytes from \a __a and
/// eight bytes from \a __b. The zero-extended SAD value is returned in the
/// corresponding 64-bit element of the result.
///
/// A single SAD operation takes the differences between the corresponding
/// bytes of \a __a and \a __b, takes the absolute value of each difference,
/// and sums these eight values to form one 16-bit result. This operation
/// is repeated four times with successive sets of eight bytes.
///
/// \code{.operation}
/// FOR i := 0 TO 3
/// j := i*64
/// temp0 := ABS(__a[j+7:j] - __b[j+7:j])
/// temp1 := ABS(__a[j+15:j+8] - __b[j+15:j+8])
/// temp2 := ABS(__a[j+23:j+16] - __b[j+23:j+16])
/// temp3 := ABS(__a[j+31:j+24] - __b[j+31:j+24])
/// temp4 := ABS(__a[j+39:j+32] - __b[j+39:j+32])
/// temp5 := ABS(__a[j+47:j+40] - __b[j+47:j+40])
/// temp6 := ABS(__a[j+55:j+48] - __b[j+55:j+48])
/// temp7 := ABS(__a[j+63:j+56] - __b[j+63:j+56])
/// result[j+15:j] := temp0 + temp1 + temp2 + temp3 +
/// temp4 + temp5 + temp6 + temp7
/// result[j+63:j+16] := 0
/// ENDFOR
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VPSADBW instruction.
///
/// \param __a
/// A 256-bit integer vector.
/// \param __b
/// A 256-bit integer vector.
/// \returns A 256-bit integer vector containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_sad_epu8(__m256i __a, __m256i __b)
{
return __builtin_ia32_psadbw256((__v32qi)__a, (__v32qi)__b);
}
/// Shuffles 8-bit integers in the 256-bit integer vector \a __a according
/// to control information in the 256-bit integer vector \a __b, and
/// returns the 256-bit result. In effect there are two separate 128-bit
/// shuffles in the lower and upper halves.
///
/// \code{.operation}
/// FOR i := 0 TO 31
/// j := i*8
/// IF __b[j+7] == 1
/// result[j+7:j] := 0
/// ELSE
/// k := __b[j+3:j] * 8
/// IF i > 15
/// k := k + 128
/// FI
/// result[j+7:j] := __a[k+7:k]
/// FI
/// ENDFOR
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VPSHUFB instruction.
///
/// \param __a
/// A 256-bit integer vector containing source values.
/// \param __b
/// A 256-bit integer vector containing control information to determine
/// what goes into the corresponding byte of the result. If bit 7 of the
/// control byte is 1, the result byte is 0; otherwise, bits 3:0 of the
/// control byte specify the index (within the same 128-bit half) of \a __a
/// to copy to the result byte.
/// \returns A 256-bit integer vector containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_shuffle_epi8(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_ia32_pshufb256((__v32qi)__a, (__v32qi)__b);
}
/// Shuffles 32-bit integers from the 256-bit vector of [8 x i32] in \a a
/// according to control information in the integer literal \a imm, and
/// returns the 256-bit result. In effect there are two parallel 128-bit
/// shuffles in the lower and upper halves.
///
/// \code{.operation}
/// FOR i := 0 to 3
/// j := i*32
/// k := (imm >> i*2)[1:0] * 32
/// result[j+31:j] := a[k+31:k]
/// result[128+j+31:128+j] := a[128+k+31:128+k]
/// ENDFOR
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// \code
/// __m256i _mm256_shuffle_epi32(__m256i a, const int imm);
/// \endcode
///
/// This intrinsic corresponds to the \c VPSHUFB instruction.
///
/// \param a
/// A 256-bit vector of [8 x i32] containing source values.
/// \param imm
/// An immediate 8-bit value specifying which elements to copy from \a a.
/// \a imm[1:0] specifies the index in \a a for elements 0 and 4 of the
/// result, \a imm[3:2] specifies the index for elements 1 and 5, and so
/// forth.
/// \returns A 256-bit vector of [8 x i32] containing the result.
#define _mm256_shuffle_epi32(a, imm) \
((__m256i)__builtin_ia32_pshufd256((__v8si)(__m256i)(a), (int)(imm)))
/// Shuffles 16-bit integers from the 256-bit vector of [16 x i16] in \a a
/// according to control information in the integer literal \a imm, and
/// returns the 256-bit result. The upper 64 bits of each 128-bit half
/// are shuffled in parallel; the lower 64 bits of each 128-bit half are
/// copied from \a a unchanged.
///
/// \code{.operation}
/// result[63:0] := a[63:0]
/// result[191:128] := a[191:128]
/// FOR i := 0 TO 3
/// j := i * 16 + 64
/// k := (imm >> i*2)[1:0] * 16 + 64
/// result[j+15:j] := a[k+15:k]
/// result[128+j+15:128+j] := a[128+k+15:128+k]
/// ENDFOR
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// \code
/// __m256i _mm256_shufflehi_epi16(__m256i a, const int imm);
/// \endcode
///
/// This intrinsic corresponds to the \c VPSHUFHW instruction.
///
/// \param a
/// A 256-bit vector of [16 x i16] containing source values.
/// \param imm
/// An immediate 8-bit value specifying which elements to copy from \a a.
/// \a imm[1:0] specifies the index in \a a for elements 4 and 8 of the
/// result, \a imm[3:2] specifies the index for elements 5 and 9, and so
/// forth. Indexes are offset by 4 (so 0 means index 4, and so forth).
/// \returns A 256-bit vector of [16 x i16] containing the result.
#define _mm256_shufflehi_epi16(a, imm) \
((__m256i)__builtin_ia32_pshufhw256((__v16hi)(__m256i)(a), (int)(imm)))
/// Shuffles 16-bit integers from the 256-bit vector of [16 x i16] \a a
/// according to control information in the integer literal \a imm, and
/// returns the 256-bit [16 x i16] result. The lower 64 bits of each
/// 128-bit half are shuffled; the upper 64 bits of each 128-bit half are
/// copied from \a a unchanged.
///
/// \code{.operation}
/// result[127:64] := a[127:64]
/// result[255:192] := a[255:192]
/// FOR i := 0 TO 3
/// j := i * 16
/// k := (imm >> i*2)[1:0] * 16
/// result[j+15:j] := a[k+15:k]
/// result[128+j+15:128+j] := a[128+k+15:128+k]
/// ENDFOR
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// \code
/// __m256i _mm256_shufflelo_epi16(__m256i a, const int imm);
/// \endcode
///
/// This intrinsic corresponds to the \c VPSHUFLW instruction.
///
/// \param a
/// A 256-bit vector of [16 x i16] to use as a source of data for the
/// result.
/// \param imm
/// An immediate 8-bit value specifying which elements to copy from \a a.
/// \a imm[1:0] specifies the index in \a a for elements 0 and 8 of the
/// result, \a imm[3:2] specifies the index for elements 1 and 9, and so
/// forth.
/// \returns A 256-bit vector of [16 x i16] containing the result.
#define _mm256_shufflelo_epi16(a, imm) \
((__m256i)__builtin_ia32_pshuflw256((__v16hi)(__m256i)(a), (int)(imm)))
/// Sets each byte of the result to the corresponding byte of the 256-bit
/// integer vector in \a __a, the negative of that byte, or zero, depending
/// on whether the corresponding byte of the 256-bit integer vector in
/// \a __b is greater than zero, less than zero, or equal to zero,
/// respectively.
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VPSIGNB instruction.
///
/// \param __a
/// A 256-bit integer vector.
/// \param __b
/// A 256-bit integer vector].
/// \returns A 256-bit integer vector containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_sign_epi8(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_ia32_psignb256((__v32qi)__a, (__v32qi)__b);
}
/// Sets each element of the result to the corresponding element of the
/// 256-bit vector of [16 x i16] in \a __a, the negative of that element,
/// or zero, depending on whether the corresponding element of the 256-bit
/// vector of [16 x i16] in \a __b is greater than zero, less than zero, or
/// equal to zero, respectively.
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VPSIGNW instruction.
///
/// \param __a
/// A 256-bit vector of [16 x i16].
/// \param __b
/// A 256-bit vector of [16 x i16].
/// \returns A 256-bit vector of [16 x i16] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_sign_epi16(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_ia32_psignw256((__v16hi)__a, (__v16hi)__b);
}
/// Sets each element of the result to the corresponding element of the
/// 256-bit vector of [8 x i32] in \a __a, the negative of that element, or
/// zero, depending on whether the corresponding element of the 256-bit
/// vector of [8 x i32] in \a __b is greater than zero, less than zero, or
/// equal to zero, respectively.
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VPSIGND instruction.
///
/// \param __a
/// A 256-bit vector of [8 x i32].
/// \param __b
/// A 256-bit vector of [8 x i32].
/// \returns A 256-bit vector of [8 x i32] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_sign_epi32(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_ia32_psignd256((__v8si)__a, (__v8si)__b);
}
/// Shifts each 128-bit half of the 256-bit integer vector \a a left by
/// \a imm bytes, shifting in zero bytes, and returns the result. If \a imm
/// is greater than 15, the returned result is all zeroes.
///
/// \headerfile <immintrin.h>
///
/// \code
/// __m256i _mm256_slli_si256(__m256i a, const int imm);
/// \endcode
///
/// This intrinsic corresponds to the \c VPSLLDQ instruction.
///
/// \param a
/// A 256-bit integer vector to be shifted.
/// \param imm
/// An unsigned immediate value specifying the shift count (in bytes).
/// \returns A 256-bit integer vector containing the result.
#define _mm256_slli_si256(a, imm) \
((__m256i)__builtin_ia32_pslldqi256_byteshift((__v4di)(__m256i)(a), (int)(imm)))
/// Shifts each 128-bit half of the 256-bit integer vector \a a left by
/// \a imm bytes, shifting in zero bytes, and returns the result. If \a imm
/// is greater than 15, the returned result is all zeroes.
///
/// \headerfile <immintrin.h>
///
/// \code
/// __m256i _mm256_bslli_epi128(__m256i a, const int imm);
/// \endcode
///
/// This intrinsic corresponds to the \c VPSLLDQ instruction.
///
/// \param a
/// A 256-bit integer vector to be shifted.
/// \param imm
/// An unsigned immediate value specifying the shift count (in bytes).
/// \returns A 256-bit integer vector containing the result.
#define _mm256_bslli_epi128(a, imm) \
((__m256i)__builtin_ia32_pslldqi256_byteshift((__v4di)(__m256i)(a), (int)(imm)))
/// Shifts each 16-bit element of the 256-bit vector of [16 x i16] in \a __a
/// left by \a __count bits, shifting in zero bits, and returns the result.
/// If \a __count is greater than 15, the returned result is all zeroes.
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VPSLLW instruction.
///
/// \param __a
/// A 256-bit vector of [16 x i16] to be shifted.
/// \param __count
/// An unsigned integer value specifying the shift count (in bits).
/// \returns A 256-bit vector of [16 x i16] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_slli_epi16(__m256i __a, int __count)
{
return (__m256i)__builtin_ia32_psllwi256((__v16hi)__a, __count);
}
/// Shifts each 16-bit element of the 256-bit vector of [16 x i16] in \a __a
/// left by the number of bits specified by the lower 64 bits of \a __count,
/// shifting in zero bits, and returns the result. If \a __count is greater
/// than 15, the returned result is all zeroes.
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VPSLLW instruction.
///
/// \param __a
/// A 256-bit vector of [16 x i16] to be shifted.
/// \param __count
/// A 128-bit vector of [2 x i64] whose lower element gives the unsigned
/// shift count (in bits). The upper element is ignored.
/// \returns A 256-bit vector of [16 x i16] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_sll_epi16(__m256i __a, __m128i __count)
{
return (__m256i)__builtin_ia32_psllw256((__v16hi)__a, (__v8hi)__count);
}
/// Shifts each 32-bit element of the 256-bit vector of [8 x i32] in \a __a
/// left by \a __count bits, shifting in zero bits, and returns the result.
/// If \a __count is greater than 31, the returned result is all zeroes.
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VPSLLD instruction.
///
/// \param __a
/// A 256-bit vector of [8 x i32] to be shifted.
/// \param __count
/// An unsigned integer value specifying the shift count (in bits).
/// \returns A 256-bit vector of [8 x i32] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_slli_epi32(__m256i __a, int __count)
{
return (__m256i)__builtin_ia32_pslldi256((__v8si)__a, __count);
}
/// Shifts each 32-bit element of the 256-bit vector of [8 x i32] in \a __a
/// left by the number of bits given in the lower 64 bits of \a __count,
/// shifting in zero bits, and returns the result. If \a __count is greater
/// than 31, the returned result is all zeroes.
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VPSLLD instruction.
///
/// \param __a
/// A 256-bit vector of [8 x i32] to be shifted.
/// \param __count
/// A 128-bit vector of [2 x i64] whose lower element gives the unsigned
/// shift count (in bits). The upper element is ignored.
/// \returns A 256-bit vector of [8 x i32] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_sll_epi32(__m256i __a, __m128i __count)
{
return (__m256i)__builtin_ia32_pslld256((__v8si)__a, (__v4si)__count);
}
/// Shifts each 64-bit element of the 256-bit vector of [4 x i64] in \a __a
/// left by \a __count bits, shifting in zero bits, and returns the result.
/// If \a __count is greater than 63, the returned result is all zeroes.
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VPSLLQ instruction.
///
/// \param __a
/// A 256-bit vector of [4 x i64] to be shifted.
/// \param __count
/// An unsigned integer value specifying the shift count (in bits).
/// \returns A 256-bit vector of [4 x i64] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_slli_epi64(__m256i __a, int __count)
{
return __builtin_ia32_psllqi256((__v4di)__a, __count);
}
/// Shifts each 64-bit element of the 256-bit vector of [4 x i64] in \a __a
/// left by the number of bits given in the lower 64 bits of \a __count,
/// shifting in zero bits, and returns the result. If \a __count is greater
/// than 63, the returned result is all zeroes.
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VPSLLQ instruction.
///
/// \param __a
/// A 256-bit vector of [4 x i64] to be shifted.
/// \param __count
/// A 128-bit vector of [2 x i64] whose lower element gives the unsigned
/// shift count (in bits). The upper element is ignored.
/// \returns A 256-bit vector of [4 x i64] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_sll_epi64(__m256i __a, __m128i __count)
{
return __builtin_ia32_psllq256((__v4di)__a, __count);
}
/// Shifts each 16-bit element of the 256-bit vector of [16 x i16] in \a __a
/// right by \a __count bits, shifting in sign bits, and returns the result.
/// If \a __count is greater than 15, each element of the result is either
/// 0 or -1 according to the corresponding input sign bit.
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VPSRAW instruction.
///
/// \param __a
/// A 256-bit vector of [16 x i16] to be shifted.
/// \param __count
/// An unsigned integer value specifying the shift count (in bits).
/// \returns A 256-bit vector of [16 x i16] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_srai_epi16(__m256i __a, int __count)
{
return (__m256i)__builtin_ia32_psrawi256((__v16hi)__a, __count);
}
/// Shifts each 16-bit element of the 256-bit vector of [16 x i16] in \a __a
/// right by the number of bits given in the lower 64 bits of \a __count,
/// shifting in sign bits, and returns the result. If \a __count is greater
/// than 15, each element of the result is either 0 or -1 according to the
/// corresponding input sign bit.
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VPSRAW instruction.
///
/// \param __a
/// A 256-bit vector of [16 x i16] to be shifted.
/// \param __count
/// A 128-bit vector of [2 x i64] whose lower element gives the unsigned
/// shift count (in bits). The upper element is ignored.
/// \returns A 256-bit vector of [16 x i16] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_sra_epi16(__m256i __a, __m128i __count)
{
return (__m256i)__builtin_ia32_psraw256((__v16hi)__a, (__v8hi)__count);
}
/// Shifts each 32-bit element of the 256-bit vector of [8 x i32] in \a __a
/// right by \a __count bits, shifting in sign bits, and returns the result.
/// If \a __count is greater than 31, each element of the result is either
/// 0 or -1 according to the corresponding input sign bit.
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VPSRAD instruction.
///
/// \param __a
/// A 256-bit vector of [8 x i32] to be shifted.
/// \param __count
/// An unsigned integer value specifying the shift count (in bits).
/// \returns A 256-bit vector of [8 x i32] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_srai_epi32(__m256i __a, int __count)
{
return (__m256i)__builtin_ia32_psradi256((__v8si)__a, __count);
}
/// Shifts each 32-bit element of the 256-bit vector of [8 x i32] in \a __a
/// right by the number of bits given in the lower 64 bits of \a __count,
/// shifting in sign bits, and returns the result. If \a __count is greater
/// than 31, each element of the result is either 0 or -1 according to the
/// corresponding input sign bit.
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VPSRAD instruction.
///
/// \param __a
/// A 256-bit vector of [8 x i32] to be shifted.
/// \param __count
/// A 128-bit vector of [2 x i64] whose lower element gives the unsigned
/// shift count (in bits). The upper element is ignored.
/// \returns A 256-bit vector of [8 x i32] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_sra_epi32(__m256i __a, __m128i __count)
{
return (__m256i)__builtin_ia32_psrad256((__v8si)__a, (__v4si)__count);
}
/// Shifts each 128-bit half of the 256-bit integer vector in \a a right by
/// \a imm bytes, shifting in zero bytes, and returns the result. If
/// \a imm is greater than 15, the returned result is all zeroes.
///
/// \headerfile <immintrin.h>
///
/// \code
/// __m256i _mm256_srli_si256(__m256i a, const int imm);
/// \endcode
///
/// This intrinsic corresponds to the \c VPSRLDQ instruction.
///
/// \param a
/// A 256-bit integer vector to be shifted.
/// \param imm
/// An unsigned immediate value specifying the shift count (in bytes).
/// \returns A 256-bit integer vector containing the result.
#define _mm256_srli_si256(a, imm) \
((__m256i)__builtin_ia32_psrldqi256_byteshift((__m256i)(a), (int)(imm)))
/// Shifts each 128-bit half of the 256-bit integer vector in \a a right by
/// \a imm bytes, shifting in zero bytes, and returns the result. If
/// \a imm is greater than 15, the returned result is all zeroes.
///
/// \headerfile <immintrin.h>
///
/// \code
/// __m256i _mm256_bsrli_epi128(__m256i a, const int imm);
/// \endcode
///
/// This intrinsic corresponds to the \c VPSRLDQ instruction.
///
/// \param a
/// A 256-bit integer vector to be shifted.
/// \param imm
/// An unsigned immediate value specifying the shift count (in bytes).
/// \returns A 256-bit integer vector containing the result.
#define _mm256_bsrli_epi128(a, imm) \
((__m256i)__builtin_ia32_psrldqi256_byteshift((__m256i)(a), (int)(imm)))
/// Shifts each 16-bit element of the 256-bit vector of [16 x i16] in \a __a
/// right by \a __count bits, shifting in zero bits, and returns the result.
/// If \a __count is greater than 15, the returned result is all zeroes.
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VPSRLW instruction.
///
/// \param __a
/// A 256-bit vector of [16 x i16] to be shifted.
/// \param __count
/// An unsigned integer value specifying the shift count (in bits).
/// \returns A 256-bit vector of [16 x i16] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_srli_epi16(__m256i __a, int __count)
{
return (__m256i)__builtin_ia32_psrlwi256((__v16hi)__a, __count);
}
/// Shifts each 16-bit element of the 256-bit vector of [16 x i16] in \a __a
/// right by the number of bits given in the lower 64 bits of \a __count,
/// shifting in zero bits, and returns the result. If \a __count is greater
/// than 15, the returned result is all zeroes.
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VPSRLW instruction.
///
/// \param __a
/// A 256-bit vector of [16 x i16] to be shifted.
/// \param __count
/// A 128-bit vector of [2 x i64] whose lower element gives the unsigned
/// shift count (in bits). The upper element is ignored.
/// \returns A 256-bit vector of [16 x i16] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_srl_epi16(__m256i __a, __m128i __count)
{
return (__m256i)__builtin_ia32_psrlw256((__v16hi)__a, (__v8hi)__count);
}
/// Shifts each 32-bit element of the 256-bit vector of [8 x i32] in \a __a
/// right by \a __count bits, shifting in zero bits, and returns the result.
/// If \a __count is greater than 31, the returned result is all zeroes.
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VPSRLD instruction.
///
/// \param __a
/// A 256-bit vector of [8 x i32] to be shifted.
/// \param __count
/// An unsigned integer value specifying the shift count (in bits).
/// \returns A 256-bit vector of [8 x i32] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_srli_epi32(__m256i __a, int __count)
{
return (__m256i)__builtin_ia32_psrldi256((__v8si)__a, __count);
}
/// Shifts each 32-bit element of the 256-bit vector of [8 x i32] in \a __a
/// right by the number of bits given in the lower 64 bits of \a __count,
/// shifting in zero bits, and returns the result. If \a __count is greater
/// than 31, the returned result is all zeroes.
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VPSRLD instruction.
///
/// \param __a
/// A 256-bit vector of [8 x i32] to be shifted.
/// \param __count
/// A 128-bit vector of [2 x i64] whose lower element gives the unsigned
/// shift count (in bits). The upper element is ignored.
/// \returns A 256-bit vector of [8 x i32] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_srl_epi32(__m256i __a, __m128i __count)
{
return (__m256i)__builtin_ia32_psrld256((__v8si)__a, (__v4si)__count);
}
/// Shifts each 64-bit element of the 256-bit vector of [4 x i64] in \a __a
/// right by \a __count bits, shifting in zero bits, and returns the result.
/// If \a __count is greater than 63, the returned result is all zeroes.
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VPSRLQ instruction.
///
/// \param __a
/// A 256-bit vector of [4 x i64] to be shifted.
/// \param __count
/// An unsigned integer value specifying the shift count (in bits).
/// \returns A 256-bit vector of [4 x i64] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_srli_epi64(__m256i __a, int __count)
{
return __builtin_ia32_psrlqi256((__v4di)__a, __count);
}
/// Shifts each 64-bit element of the 256-bit vector of [4 x i64] in \a __a
/// right by the number of bits given in the lower 64 bits of \a __count,
/// shifting in zero bits, and returns the result. If \a __count is greater
/// than 63, the returned result is all zeroes.
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VPSRLQ instruction.
///
/// \param __a
/// A 256-bit vector of [4 x i64] to be shifted.
/// \param __count
/// A 128-bit vector of [2 x i64] whose lower element gives the unsigned
/// shift count (in bits). The upper element is ignored.
/// \returns A 256-bit vector of [4 x i64] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_srl_epi64(__m256i __a, __m128i __count)
{
return __builtin_ia32_psrlq256((__v4di)__a, __count);
}
/// Subtracts 8-bit integers from corresponding bytes of two 256-bit integer
/// vectors. Returns the lower 8 bits of each difference in the
/// corresponding byte of the 256-bit integer vector result (overflow is
/// ignored).
///
/// \code{.operation}
/// FOR i := 0 TO 31
/// j := i*8
/// result[j+7:j] := __a[j+7:j] - __b[j+7:j]
/// ENDFOR
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VPSUBB instruction.
///
/// \param __a
/// A 256-bit integer vector containing the minuends.
/// \param __b
/// A 256-bit integer vector containing the subtrahends.
/// \returns A 256-bit integer vector containing the differences.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_sub_epi8(__m256i __a, __m256i __b)
{
return (__m256i)((__v32qu)__a - (__v32qu)__b);
}
/// Subtracts 16-bit integers from corresponding elements of two 256-bit
/// vectors of [16 x i16]. Returns the lower 16 bits of each difference in
/// the corresponding element of the [16 x i16] result (overflow is
/// ignored).
///
/// \code{.operation}
/// FOR i := 0 TO 15
/// j := i*16
/// result[j+15:j] := __a[j+15:j] - __b[j+15:j]
/// ENDFOR
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VPSUBW instruction.
///
/// \param __a
/// A 256-bit vector of [16 x i16] containing the minuends.
/// \param __b
/// A 256-bit vector of [16 x i16] containing the subtrahends.
/// \returns A 256-bit vector of [16 x i16] containing the differences.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_sub_epi16(__m256i __a, __m256i __b)
{
return (__m256i)((__v16hu)__a - (__v16hu)__b);
}
/// Subtracts 32-bit integers from corresponding elements of two 256-bit
/// vectors of [8 x i32]. Returns the lower 32 bits of each difference in
/// the corresponding element of the [8 x i32] result (overflow is ignored).
///
/// \code{.operation}
/// FOR i := 0 TO 7
/// j := i*32
/// result[j+31:j] := __a[j+31:j] - __b[j+31:j]
/// ENDFOR
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VPSUBD instruction.
///
/// \param __a
/// A 256-bit vector of [8 x i32] containing the minuends.
/// \param __b
/// A 256-bit vector of [8 x i32] containing the subtrahends.
/// \returns A 256-bit vector of [8 x i32] containing the differences.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_sub_epi32(__m256i __a, __m256i __b)
{
return (__m256i)((__v8su)__a - (__v8su)__b);
}
/// Subtracts 64-bit integers from corresponding elements of two 256-bit
/// vectors of [4 x i64]. Returns the lower 64 bits of each difference in
/// the corresponding element of the [4 x i64] result (overflow is ignored).
///
/// \code{.operation}
/// FOR i := 0 TO 3
/// j := i*64
/// result[j+63:j] := __a[j+63:j] - __b[j+63:j]
/// ENDFOR
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VPSUBQ instruction.
///
/// \param __a
/// A 256-bit vector of [4 x i64] containing the minuends.
/// \param __b
/// A 256-bit vector of [4 x i64] containing the subtrahends.
/// \returns A 256-bit vector of [4 x i64] containing the differences.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_sub_epi64(__m256i __a, __m256i __b)
{
return (__m256i)((__v4du)__a - (__v4du)__b);
}
/// Subtracts 8-bit integers from corresponding bytes of two 256-bit integer
/// vectors using signed saturation, and returns each differences in the
/// corresponding byte of the 256-bit integer vector result.
///
/// \code{.operation}
/// FOR i := 0 TO 31
/// j := i*8
/// result[j+7:j] := SATURATE8(__a[j+7:j] - __b[j+7:j])
/// ENDFOR
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VPSUBSB instruction.
///
/// \param __a
/// A 256-bit integer vector containing the minuends.
/// \param __b
/// A 256-bit integer vector containing the subtrahends.
/// \returns A 256-bit integer vector containing the differences.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_subs_epi8(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_elementwise_sub_sat((__v32qs)__a, (__v32qs)__b);
}
/// Subtracts 16-bit integers from corresponding elements of two 256-bit
/// vectors of [16 x i16] using signed saturation, and returns each
/// difference in the corresponding element of the [16 x i16] result.
///
/// \code{.operation}
/// FOR i := 0 TO 15
/// j := i*16
/// result[j+7:j] := SATURATE16(__a[j+7:j] - __b[j+7:j])
/// ENDFOR
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VPSUBSW instruction.
///
/// \param __a
/// A 256-bit vector of [16 x i16] containing the minuends.
/// \param __b
/// A 256-bit vector of [16 x i16] containing the subtrahends.
/// \returns A 256-bit vector of [16 x i16] containing the differences.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_subs_epi16(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_elementwise_sub_sat((__v16hi)__a, (__v16hi)__b);
}
/// Subtracts 8-bit integers from corresponding bytes of two 256-bit integer
/// vectors using unsigned saturation, and returns each difference in the
/// corresponding byte of the 256-bit integer vector result. For each byte,
/// computes <c> result = __a - __b </c>.
///
/// \code{.operation}
/// FOR i := 0 TO 31
/// j := i*8
/// result[j+7:j] := SATURATE8U(__a[j+7:j] - __b[j+7:j])
/// ENDFOR
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VPSUBUSB instruction.
///
/// \param __a
/// A 256-bit integer vector containing the minuends.
/// \param __b
/// A 256-bit integer vector containing the subtrahends.
/// \returns A 256-bit integer vector containing the differences.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_subs_epu8(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_elementwise_sub_sat((__v32qu)__a, (__v32qu)__b);
}
/// Subtracts 16-bit integers from corresponding elements of two 256-bit
/// vectors of [16 x i16] using unsigned saturation, and returns each
/// difference in the corresponding element of the [16 x i16] result.
///
/// \code{.operation}
/// FOR i := 0 TO 15
/// j := i*16
/// result[j+15:j] := SATURATE16U(__a[j+15:j] - __b[j+15:j])
/// ENDFOR
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VPSUBUSW instruction.
///
/// \param __a
/// A 256-bit vector of [16 x i16] containing the minuends.
/// \param __b
/// A 256-bit vector of [16 x i16] containing the subtrahends.
/// \returns A 256-bit vector of [16 x i16] containing the differences.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_subs_epu16(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_elementwise_sub_sat((__v16hu)__a, (__v16hu)__b);
}
/// Unpacks and interleaves 8-bit integers from parts of the 256-bit integer
/// vectors in \a __a and \a __b to form the 256-bit result. Specifically,
/// uses the upper 64 bits of each 128-bit half of \a __a and \a __b as
/// input; other bits in these parameters are ignored.
///
/// \code{.operation}
/// result[7:0] := __a[71:64]
/// result[15:8] := __b[71:64]
/// result[23:16] := __a[79:72]
/// result[31:24] := __b[79:72]
/// . . .
/// result[127:120] := __b[127:120]
/// result[135:128] := __a[199:192]
/// . . .
/// result[255:248] := __b[255:248]
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VPUNPCKHBW instruction.
///
/// \param __a
/// A 256-bit integer vector used as the source for the even-numbered bytes
/// of the result.
/// \param __b
/// A 256-bit integer vector used as the source for the odd-numbered bytes
/// of the result.
/// \returns A 256-bit integer vector containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_unpackhi_epi8(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_shufflevector((__v32qi)__a, (__v32qi)__b, 8, 32+8, 9, 32+9, 10, 32+10, 11, 32+11, 12, 32+12, 13, 32+13, 14, 32+14, 15, 32+15, 24, 32+24, 25, 32+25, 26, 32+26, 27, 32+27, 28, 32+28, 29, 32+29, 30, 32+30, 31, 32+31);
}
/// Unpacks and interleaves 16-bit integers from parts of the 256-bit vectors
/// of [16 x i16] in \a __a and \a __b to return the resulting 256-bit
/// vector of [16 x i16]. Specifically, uses the upper 64 bits of each
/// 128-bit half of \a __a and \a __b as input; other bits in these
/// parameters are ignored.
///
/// \code{.operation}
/// result[15:0] := __a[79:64]
/// result[31:16] := __b[79:64]
/// result[47:32] := __a[95:80]
/// result[63:48] := __b[95:80]
/// . . .
/// result[127:112] := __b[127:112]
/// result[143:128] := __a[211:196]
/// . . .
/// result[255:240] := __b[255:240]
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VPUNPCKHWD instruction.
///
/// \param __a
/// A 256-bit vector of [16 x i16] used as the source for the even-numbered
/// elements of the result.
/// \param __b
/// A 256-bit vector of [16 x i16] used as the source for the odd-numbered
/// elements of the result.
/// \returns A 256-bit vector of [16 x i16] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_unpackhi_epi16(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_shufflevector((__v16hi)__a, (__v16hi)__b, 4, 16+4, 5, 16+5, 6, 16+6, 7, 16+7, 12, 16+12, 13, 16+13, 14, 16+14, 15, 16+15);
}
/// Unpacks and interleaves 32-bit integers from parts of the 256-bit vectors
/// of [8 x i32] in \a __a and \a __b to return the resulting 256-bit vector
/// of [8 x i32]. Specifically, uses the upper 64 bits of each 128-bit half
/// of \a __a and \a __b as input; other bits in these parameters are
/// ignored.
///
/// \code{.operation}
/// result[31:0] := __a[95:64]
/// result[63:32] := __b[95:64]
/// result[95:64] := __a[127:96]
/// result[127:96] := __b[127:96]
/// result[159:128] := __a[223:192]
/// result[191:160] := __b[223:192]
/// result[223:192] := __a[255:224]
/// result[255:224] := __b[255:224]
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VPUNPCKHDQ instruction.
///
/// \param __a
/// A 256-bit vector of [8 x i32] used as the source for the even-numbered
/// elements of the result.
/// \param __b
/// A 256-bit vector of [8 x i32] used as the source for the odd-numbered
/// elements of the result.
/// \returns A 256-bit vector of [8 x i32] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_unpackhi_epi32(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_shufflevector((__v8si)__a, (__v8si)__b, 2, 8+2, 3, 8+3, 6, 8+6, 7, 8+7);
}
/// Unpacks and interleaves 64-bit integers from parts of the 256-bit vectors
/// of [4 x i64] in \a __a and \a __b to return the resulting 256-bit vector
/// of [4 x i64]. Specifically, uses the upper 64 bits of each 128-bit half
/// of \a __a and \a __b as input; other bits in these parameters are
/// ignored.
///
/// \code{.operation}
/// result[63:0] := __a[127:64]
/// result[127:64] := __b[127:64]
/// result[191:128] := __a[255:192]
/// result[255:192] := __b[255:192]
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VPUNPCKHQDQ instruction.
///
/// \param __a
/// A 256-bit vector of [4 x i64] used as the source for the even-numbered
/// elements of the result.
/// \param __b
/// A 256-bit vector of [4 x i64] used as the source for the odd-numbered
/// elements of the result.
/// \returns A 256-bit vector of [4 x i64] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_unpackhi_epi64(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_shufflevector((__v4di)__a, (__v4di)__b, 1, 4+1, 3, 4+3);
}
/// Unpacks and interleaves 8-bit integers from parts of the 256-bit integer
/// vectors in \a __a and \a __b to form the 256-bit result. Specifically,
/// uses the lower 64 bits of each 128-bit half of \a __a and \a __b as
/// input; other bits in these parameters are ignored.
///
/// \code{.operation}
/// result[7:0] := __a[7:0]
/// result[15:8] := __b[7:0]
/// result[23:16] := __a[15:8]
/// result[31:24] := __b[15:8]
/// . . .
/// result[127:120] := __b[63:56]
/// result[135:128] := __a[135:128]
/// . . .
/// result[255:248] := __b[191:184]
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VPUNPCKLBW instruction.
///
/// \param __a
/// A 256-bit integer vector used as the source for the even-numbered bytes
/// of the result.
/// \param __b
/// A 256-bit integer vector used as the source for the odd-numbered bytes
/// of the result.
/// \returns A 256-bit integer vector containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_unpacklo_epi8(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_shufflevector((__v32qi)__a, (__v32qi)__b, 0, 32+0, 1, 32+1, 2, 32+2, 3, 32+3, 4, 32+4, 5, 32+5, 6, 32+6, 7, 32+7, 16, 32+16, 17, 32+17, 18, 32+18, 19, 32+19, 20, 32+20, 21, 32+21, 22, 32+22, 23, 32+23);
}
/// Unpacks and interleaves 16-bit integers from parts of the 256-bit vectors
/// of [16 x i16] in \a __a and \a __b to return the resulting 256-bit
/// vector of [16 x i16]. Specifically, uses the lower 64 bits of each
/// 128-bit half of \a __a and \a __b as input; other bits in these
/// parameters are ignored.
///
/// \code{.operation}
/// result[15:0] := __a[15:0]
/// result[31:16] := __b[15:0]
/// result[47:32] := __a[31:16]
/// result[63:48] := __b[31:16]
/// . . .
/// result[127:112] := __b[63:48]
/// result[143:128] := __a[143:128]
/// . . .
/// result[255:239] := __b[191:176]
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VPUNPCKLWD instruction.
///
/// \param __a
/// A 256-bit vector of [16 x i16] used as the source for the even-numbered
/// elements of the result.
/// \param __b
/// A 256-bit vector of [16 x i16] used as the source for the odd-numbered
/// elements of the result.
/// \returns A 256-bit vector of [16 x i16] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_unpacklo_epi16(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_shufflevector((__v16hi)__a, (__v16hi)__b, 0, 16+0, 1, 16+1, 2, 16+2, 3, 16+3, 8, 16+8, 9, 16+9, 10, 16+10, 11, 16+11);
}
/// Unpacks and interleaves 32-bit integers from parts of the 256-bit vectors
/// of [8 x i32] in \a __a and \a __b to return the resulting 256-bit vector
/// of [8 x i32]. Specifically, uses the lower 64 bits of each 128-bit half
/// of \a __a and \a __b as input; other bits in these parameters are
/// ignored.
///
/// \code{.operation}
/// result[31:0] := __a[31:0]
/// result[63:32] := __b[31:0]
/// result[95:64] := __a[63:32]
/// result[127:96] := __b[63:32]
/// result[159:128] := __a[159:128]
/// result[191:160] := __b[159:128]
/// result[223:192] := __a[191:160]
/// result[255:224] := __b[191:190]
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VPUNPCKLDQ instruction.
///
/// \param __a
/// A 256-bit vector of [8 x i32] used as the source for the even-numbered
/// elements of the result.
/// \param __b
/// A 256-bit vector of [8 x i32] used as the source for the odd-numbered
/// elements of the result.
/// \returns A 256-bit vector of [8 x i32] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_unpacklo_epi32(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_shufflevector((__v8si)__a, (__v8si)__b, 0, 8+0, 1, 8+1, 4, 8+4, 5, 8+5);
}
/// Unpacks and interleaves 64-bit integers from parts of the 256-bit vectors
/// of [4 x i64] in \a __a and \a __b to return the resulting 256-bit vector
/// of [4 x i64]. Specifically, uses the lower 64 bits of each 128-bit half
/// of \a __a and \a __b as input; other bits in these parameters are
/// ignored.
///
/// \code{.operation}
/// result[63:0] := __a[63:0]
/// result[127:64] := __b[63:0]
/// result[191:128] := __a[191:128]
/// result[255:192] := __b[191:128]
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VPUNPCKLQDQ instruction.
///
/// \param __a
/// A 256-bit vector of [4 x i64] used as the source for the even-numbered
/// elements of the result.
/// \param __b
/// A 256-bit vector of [4 x i64] used as the source for the odd-numbered
/// elements of the result.
/// \returns A 256-bit vector of [4 x i64] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_unpacklo_epi64(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_shufflevector((__v4di)__a, (__v4di)__b, 0, 4+0, 2, 4+2);
}
/// Computes the bitwise XOR of the 256-bit integer vectors in \a __a and
/// \a __b.
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VPXOR instruction.
///
/// \param __a
/// A 256-bit integer vector.
/// \param __b
/// A 256-bit integer vector.
/// \returns A 256-bit integer vector containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_xor_si256(__m256i __a, __m256i __b)
{
return (__m256i)((__v4du)__a ^ (__v4du)__b);
}
/// Loads the 256-bit integer vector from memory \a __V using a non-temporal
/// memory hint and returns the vector. \a __V must be aligned on a 32-byte
/// boundary.
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VMOVNTDQA instruction.
///
/// \param __V
/// A pointer to the 32-byte aligned memory containing the vector to load.
/// \returns A 256-bit integer vector loaded from memory.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_stream_load_si256(__m256i const *__V)
{
@@ -710,30 +2968,84 @@ _mm256_stream_load_si256(__m256i const *__V)
return (__m256i)__builtin_nontemporal_load((const __v4di_aligned *)__V);
}
/// Broadcasts the 32-bit floating-point value from the low element of the
/// 128-bit vector of [4 x float] in \a __X to all elements of the result's
/// 128-bit vector of [4 x float].
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VBROADCASTSS instruction.
///
/// \param __X
/// A 128-bit vector of [4 x float] whose low element will be broadcast.
/// \returns A 128-bit vector of [4 x float] containing the result.
static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_broadcastss_ps(__m128 __X)
{
return (__m128)__builtin_shufflevector((__v4sf)__X, (__v4sf)__X, 0, 0, 0, 0);
}
/// Broadcasts the 64-bit floating-point value from the low element of the
/// 128-bit vector of [2 x double] in \a __a to both elements of the
/// result's 128-bit vector of [2 x double].
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c MOVDDUP instruction.
///
/// \param __a
/// A 128-bit vector of [2 x double] whose low element will be broadcast.
/// \returns A 128-bit vector of [2 x double] containing the result.
static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_broadcastsd_pd(__m128d __a)
{
return __builtin_shufflevector((__v2df)__a, (__v2df)__a, 0, 0);
}
/// Broadcasts the 32-bit floating-point value from the low element of the
/// 128-bit vector of [4 x float] in \a __X to all elements of the
/// result's 256-bit vector of [8 x float].
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VBROADCASTSS instruction.
///
/// \param __X
/// A 128-bit vector of [4 x float] whose low element will be broadcast.
/// \returns A 256-bit vector of [8 x float] containing the result.
static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_broadcastss_ps(__m128 __X)
{
return (__m256)__builtin_shufflevector((__v4sf)__X, (__v4sf)__X, 0, 0, 0, 0, 0, 0, 0, 0);
}
/// Broadcasts the 64-bit floating-point value from the low element of the
/// 128-bit vector of [2 x double] in \a __X to all elements of the
/// result's 256-bit vector of [4 x double].
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VBROADCASTSD instruction.
///
/// \param __X
/// A 128-bit vector of [2 x double] whose low element will be broadcast.
/// \returns A 256-bit vector of [4 x double] containing the result.
static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_broadcastsd_pd(__m128d __X)
{
return (__m256d)__builtin_shufflevector((__v2df)__X, (__v2df)__X, 0, 0, 0, 0);
}
/// Broadcasts the 128-bit integer data from \a __X to both the lower and
/// upper halves of the 256-bit result.
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VBROADCASTI128 instruction.
///
/// \param __X
/// A 128-bit integer vector to be broadcast.
/// \returns A 256-bit integer vector containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_broadcastsi128_si256(__m128i __X)
{
@@ -742,295 +3054,1688 @@ _mm256_broadcastsi128_si256(__m128i __X)
#define _mm_broadcastsi128_si256(X) _mm256_broadcastsi128_si256(X)
/// Merges 32-bit integer elements from either of the two 128-bit vectors of
/// [4 x i32] in \a V1 or \a V2 to the result's 128-bit vector of [4 x i32],
/// as specified by the immediate integer operand \a M.
///
/// \code{.operation}
/// FOR i := 0 TO 3
/// j := i*32
/// IF M[i] == 0
/// result[31+j:j] := V1[31+j:j]
/// ELSE
/// result[31+j:j] := V2[32+j:j]
/// FI
/// ENDFOR
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// \code
/// __m128i _mm_blend_epi32(__m128i V1, __m128i V2, const int M);
/// \endcode
///
/// This intrinsic corresponds to the \c VPBLENDDD instruction.
///
/// \param V1
/// A 128-bit vector of [4 x i32] containing source values.
/// \param V2
/// A 128-bit vector of [4 x i32] containing source values.
/// \param M
/// An immediate 8-bit integer operand, with bits [3:0] specifying the
/// source for each element of the result. The position of the mask bit
/// corresponds to the index of a copied value. When a mask bit is 0, the
/// element is copied from \a V1; otherwise, it is copied from \a V2.
/// \returns A 128-bit vector of [4 x i32] containing the result.
#define _mm_blend_epi32(V1, V2, M) \
((__m128i)__builtin_ia32_pblendd128((__v4si)(__m128i)(V1), \
(__v4si)(__m128i)(V2), (int)(M)))
/// Merges 32-bit integer elements from either of the two 256-bit vectors of
/// [8 x i32] in \a V1 or \a V2 to return a 256-bit vector of [8 x i32],
/// as specified by the immediate integer operand \a M.
///
/// \code{.operation}
/// FOR i := 0 TO 7
/// j := i*32
/// IF M[i] == 0
/// result[31+j:j] := V1[31+j:j]
/// ELSE
/// result[31+j:j] := V2[32+j:j]
/// FI
/// ENDFOR
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// \code
/// __m256i _mm256_blend_epi32(__m256i V1, __m256i V2, const int M);
/// \endcode
///
/// This intrinsic corresponds to the \c VPBLENDDD instruction.
///
/// \param V1
/// A 256-bit vector of [8 x i32] containing source values.
/// \param V2
/// A 256-bit vector of [8 x i32] containing source values.
/// \param M
/// An immediate 8-bit integer operand, with bits [7:0] specifying the
/// source for each element of the result. The position of the mask bit
/// corresponds to the index of a copied value. When a mask bit is 0, the
/// element is copied from \a V1; otherwise, it is is copied from \a V2.
/// \returns A 256-bit vector of [8 x i32] containing the result.
#define _mm256_blend_epi32(V1, V2, M) \
((__m256i)__builtin_ia32_pblendd256((__v8si)(__m256i)(V1), \
(__v8si)(__m256i)(V2), (int)(M)))
/// Broadcasts the low byte from the 128-bit integer vector in \a __X to all
/// bytes of the 256-bit result.
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VPBROADCASTB instruction.
///
/// \param __X
/// A 128-bit integer vector whose low byte will be broadcast.
/// \returns A 256-bit integer vector containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_broadcastb_epi8(__m128i __X)
{
return (__m256i)__builtin_shufflevector((__v16qi)__X, (__v16qi)__X, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
}
/// Broadcasts the low element from the 128-bit vector of [8 x i16] in \a __X
/// to all elements of the result's 256-bit vector of [16 x i16].
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VPBROADCASTW instruction.
///
/// \param __X
/// A 128-bit vector of [8 x i16] whose low element will be broadcast.
/// \returns A 256-bit vector of [16 x i16] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_broadcastw_epi16(__m128i __X)
{
return (__m256i)__builtin_shufflevector((__v8hi)__X, (__v8hi)__X, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
}
/// Broadcasts the low element from the 128-bit vector of [4 x i32] in \a __X
/// to all elements of the result's 256-bit vector of [8 x i32].
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VPBROADCASTD instruction.
///
/// \param __X
/// A 128-bit vector of [4 x i32] whose low element will be broadcast.
/// \returns A 256-bit vector of [8 x i32] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_broadcastd_epi32(__m128i __X)
{
return (__m256i)__builtin_shufflevector((__v4si)__X, (__v4si)__X, 0, 0, 0, 0, 0, 0, 0, 0);
}
/// Broadcasts the low element from the 128-bit vector of [2 x i64] in \a __X
/// to all elements of the result's 256-bit vector of [4 x i64].
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VPBROADCASTQ instruction.
///
/// \param __X
/// A 128-bit vector of [2 x i64] whose low element will be broadcast.
/// \returns A 256-bit vector of [4 x i64] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_broadcastq_epi64(__m128i __X)
{
return (__m256i)__builtin_shufflevector((__v2di)__X, (__v2di)__X, 0, 0, 0, 0);
}
/// Broadcasts the low byte from the 128-bit integer vector in \a __X to all
/// bytes of the 128-bit result.
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VPBROADCASTB instruction.
///
/// \param __X
/// A 128-bit integer vector whose low byte will be broadcast.
/// \returns A 128-bit integer vector containing the result.
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_broadcastb_epi8(__m128i __X)
{
return (__m128i)__builtin_shufflevector((__v16qi)__X, (__v16qi)__X, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
}
/// Broadcasts the low element from the 128-bit vector of [8 x i16] in
/// \a __X to all elements of the result's 128-bit vector of [8 x i16].
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VPBROADCASTW instruction.
///
/// \param __X
/// A 128-bit vector of [8 x i16] whose low element will be broadcast.
/// \returns A 128-bit vector of [8 x i16] containing the result.
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_broadcastw_epi16(__m128i __X)
{
return (__m128i)__builtin_shufflevector((__v8hi)__X, (__v8hi)__X, 0, 0, 0, 0, 0, 0, 0, 0);
}
/// Broadcasts the low element from the 128-bit vector of [4 x i32] in \a __X
/// to all elements of the result's vector of [4 x i32].
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VPBROADCASTD instruction.
///
/// \param __X
/// A 128-bit vector of [4 x i32] whose low element will be broadcast.
/// \returns A 128-bit vector of [4 x i32] containing the result.
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_broadcastd_epi32(__m128i __X)
{
return (__m128i)__builtin_shufflevector((__v4si)__X, (__v4si)__X, 0, 0, 0, 0);
}
/// Broadcasts the low element from the 128-bit vector of [2 x i64] in \a __X
/// to both elements of the result's 128-bit vector of [2 x i64].
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VPBROADCASTQ instruction.
///
/// \param __X
/// A 128-bit vector of [2 x i64] whose low element will be broadcast.
/// \returns A 128-bit vector of [2 x i64] containing the result.
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_broadcastq_epi64(__m128i __X)
{
return (__m128i)__builtin_shufflevector((__v2di)__X, (__v2di)__X, 0, 0);
}
/// Sets the result's 256-bit vector of [8 x i32] to copies of elements of the
/// 256-bit vector of [8 x i32] in \a __a as specified by indexes in the
/// elements of the 256-bit vector of [8 x i32] in \a __b.
///
/// \code{.operation}
/// FOR i := 0 TO 7
/// j := i*32
/// k := __b[j+2:j] * 32
/// result[j+31:j] := __a[k+31:k]
/// ENDFOR
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VPERMD instruction.
///
/// \param __a
/// A 256-bit vector of [8 x i32] containing the source values.
/// \param __b
/// A 256-bit vector of [8 x i32] containing indexes of values to use from
/// \a __a.
/// \returns A 256-bit vector of [8 x i32] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_permutevar8x32_epi32(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_ia32_permvarsi256((__v8si)__a, (__v8si)__b);
}
/// Sets the result's 256-bit vector of [4 x double] to copies of elements of
/// the 256-bit vector of [4 x double] in \a V as specified by the
/// immediate value \a M.
///
/// \code{.operation}
/// FOR i := 0 TO 3
/// j := i*64
/// k := (M >> i*2)[1:0] * 64
/// result[j+63:j] := V[k+63:k]
/// ENDFOR
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// \code
/// __m256d _mm256_permute4x64_pd(__m256d V, const int M);
/// \endcode
///
/// This intrinsic corresponds to the \c VPERMPD instruction.
///
/// \param V
/// A 256-bit vector of [4 x double] containing the source values.
/// \param M
/// An immediate 8-bit value specifying which elements to copy from \a V.
/// \a M[1:0] specifies the index in \a a for element 0 of the result,
/// \a M[3:2] specifies the index for element 1, and so forth.
/// \returns A 256-bit vector of [4 x double] containing the result.
#define _mm256_permute4x64_pd(V, M) \
((__m256d)__builtin_ia32_permdf256((__v4df)(__m256d)(V), (int)(M)))
/// Sets the result's 256-bit vector of [8 x float] to copies of elements of
/// the 256-bit vector of [8 x float] in \a __a as specified by indexes in
/// the elements of the 256-bit vector of [8 x i32] in \a __b.
///
/// \code{.operation}
/// FOR i := 0 TO 7
/// j := i*32
/// k := __b[j+2:j] * 32
/// result[j+31:j] := __a[k+31:k]
/// ENDFOR
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VPERMPS instruction.
///
/// \param __a
/// A 256-bit vector of [8 x float] containing the source values.
/// \param __b
/// A 256-bit vector of [8 x i32] containing indexes of values to use from
/// \a __a.
/// \returns A 256-bit vector of [8 x float] containing the result.
static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_permutevar8x32_ps(__m256 __a, __m256i __b)
{
return (__m256)__builtin_ia32_permvarsf256((__v8sf)__a, (__v8si)__b);
}
/// Sets the result's 256-bit vector of [4 x i64] result to copies of elements
/// of the 256-bit vector of [4 x i64] in \a V as specified by the
/// immediate value \a M.
///
/// \code{.operation}
/// FOR i := 0 TO 3
/// j := i*64
/// k := (M >> i*2)[1:0] * 64
/// result[j+63:j] := V[k+63:k]
/// ENDFOR
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// \code
/// __m256i _mm256_permute4x64_epi64(__m256i V, const int M);
/// \endcode
///
/// This intrinsic corresponds to the \c VPERMQ instruction.
///
/// \param V
/// A 256-bit vector of [4 x i64] containing the source values.
/// \param M
/// An immediate 8-bit value specifying which elements to copy from \a V.
/// \a M[1:0] specifies the index in \a a for element 0 of the result,
/// \a M[3:2] specifies the index for element 1, and so forth.
/// \returns A 256-bit vector of [4 x i64] containing the result.
#define _mm256_permute4x64_epi64(V, M) \
((__m256i)__builtin_ia32_permdi256((__v4di)(__m256i)(V), (int)(M)))
/// Sets each half of the 256-bit result either to zero or to one of the
/// four possible 128-bit halves of the 256-bit vectors \a V1 and \a V2,
/// as specified by the immediate value \a M.
///
/// \code{.operation}
/// FOR i := 0 TO 1
/// j := i*128
/// k := M >> (i*4)
/// IF k[3] == 0
/// CASE (k[1:0]) OF
/// 0: result[127+j:j] := V1[127:0]
/// 1: result[127+j:j] := V1[255:128]
/// 2: result[127+j:j] := V2[127:0]
/// 3: result[127+j:j] := V2[255:128]
/// ESAC
/// ELSE
/// result[127+j:j] := 0
/// FI
/// ENDFOR
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// \code
/// __m256i _mm256_permute2x128_si256(__m256i V1, __m256i V2, const int M);
/// \endcode
///
/// This intrinsic corresponds to the \c VPERM2I128 instruction.
///
/// \param V1
/// A 256-bit integer vector containing source values.
/// \param V2
/// A 256-bit integer vector containing source values.
/// \param M
/// An immediate value specifying how to form the result. Bits [3:0]
/// control the lower half of the result, bits [7:4] control the upper half.
/// Within each 4-bit control value, if bit 3 is 1, the result is zero,
/// otherwise bits [1:0] determine the source as follows. \n
/// 0: the lower half of \a V1 \n
/// 1: the upper half of \a V1 \n
/// 2: the lower half of \a V2 \n
/// 3: the upper half of \a V2
/// \returns A 256-bit integer vector containing the result.
#define _mm256_permute2x128_si256(V1, V2, M) \
((__m256i)__builtin_ia32_permti256((__m256i)(V1), (__m256i)(V2), (int)(M)))
/// Extracts half of the 256-bit vector \a V to the 128-bit result. If bit 0
/// of the immediate \a M is zero, extracts the lower half of the result;
/// otherwise, extracts the upper half.
///
/// \headerfile <immintrin.h>
///
/// \code
/// __m128i _mm256_extracti128_si256(__m256i V, const int M);
/// \endcode
///
/// This intrinsic corresponds to the \c VEXTRACTI128 instruction.
///
/// \param V
/// A 256-bit integer vector containing the source values.
/// \param M
/// An immediate value specifying which half of \a V to extract.
/// \returns A 128-bit integer vector containing the result.
#define _mm256_extracti128_si256(V, M) \
((__m128i)__builtin_ia32_extract128i256((__v4di)(__m256i)(V), (int)(M)))
/// Copies the 256-bit vector \a V1 to the result, then overwrites half of the
/// result with the 128-bit vector \a V2. If bit 0 of the immediate \a M
/// is zero, overwrites the lower half of the result; otherwise,
/// overwrites the upper half.
///
/// \headerfile <immintrin.h>
///
/// \code
/// __m256i _mm256_inserti128_si256(__m256i V1, __m128i V2, const int M);
/// \endcode
///
/// This intrinsic corresponds to the \c VINSERTI128 instruction.
///
/// \param V1
/// A 256-bit integer vector containing a source value.
/// \param V2
/// A 128-bit integer vector containing a source value.
/// \param M
/// An immediate value specifying where to put \a V2 in the result.
/// \returns A 256-bit integer vector containing the result.
#define _mm256_inserti128_si256(V1, V2, M) \
((__m256i)__builtin_ia32_insert128i256((__v4di)(__m256i)(V1), \
(__v2di)(__m128i)(V2), (int)(M)))
/// Conditionally loads eight 32-bit integer elements from memory \a __X, if
/// the most significant bit of the corresponding element in the mask
/// \a __M is set; otherwise, sets that element of the result to zero.
/// Returns the 256-bit [8 x i32] result.
///
/// \code{.operation}
/// FOR i := 0 TO 7
/// j := i*32
/// IF __M[j+31] == 1
/// result[j+31:j] := Load32(__X+(i*4))
/// ELSE
/// result[j+31:j] := 0
/// FI
/// ENDFOR
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VPMASKMOVD instruction.
///
/// \param __X
/// A pointer to the memory used for loading values.
/// \param __M
/// A 256-bit vector of [8 x i32] containing the mask bits.
/// \returns A 256-bit vector of [8 x i32] containing the loaded or zeroed
/// elements.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskload_epi32(int const *__X, __m256i __M)
{
return (__m256i)__builtin_ia32_maskloadd256((const __v8si *)__X, (__v8si)__M);
}
/// Conditionally loads four 64-bit integer elements from memory \a __X, if
/// the most significant bit of the corresponding element in the mask
/// \a __M is set; otherwise, sets that element of the result to zero.
/// Returns the 256-bit [4 x i64] result.
///
/// \code{.operation}
/// FOR i := 0 TO 3
/// j := i*64
/// IF __M[j+63] == 1
/// result[j+63:j] := Load64(__X+(i*8))
/// ELSE
/// result[j+63:j] := 0
/// FI
/// ENDFOR
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VPMASKMOVQ instruction.
///
/// \param __X
/// A pointer to the memory used for loading values.
/// \param __M
/// A 256-bit vector of [4 x i64] containing the mask bits.
/// \returns A 256-bit vector of [4 x i64] containing the loaded or zeroed
/// elements.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskload_epi64(long long const *__X, __m256i __M)
{
return (__m256i)__builtin_ia32_maskloadq256((const __v4di *)__X, (__v4di)__M);
}
/// Conditionally loads four 32-bit integer elements from memory \a __X, if
/// the most significant bit of the corresponding element in the mask
/// \a __M is set; otherwise, sets that element of the result to zero.
/// Returns the 128-bit [4 x i32] result.
///
/// \code{.operation}
/// FOR i := 0 TO 3
/// j := i*32
/// IF __M[j+31] == 1
/// result[j+31:j] := Load32(__X+(i*4))
/// ELSE
/// result[j+31:j] := 0
/// FI
/// ENDFOR
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VPMASKMOVD instruction.
///
/// \param __X
/// A pointer to the memory used for loading values.
/// \param __M
/// A 128-bit vector of [4 x i32] containing the mask bits.
/// \returns A 128-bit vector of [4 x i32] containing the loaded or zeroed
/// elements.
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskload_epi32(int const *__X, __m128i __M)
{
return (__m128i)__builtin_ia32_maskloadd((const __v4si *)__X, (__v4si)__M);
}
/// Conditionally loads two 64-bit integer elements from memory \a __X, if
/// the most significant bit of the corresponding element in the mask
/// \a __M is set; otherwise, sets that element of the result to zero.
/// Returns the 128-bit [2 x i64] result.
///
/// \code{.operation}
/// FOR i := 0 TO 1
/// j := i*64
/// IF __M[j+63] == 1
/// result[j+63:j] := Load64(__X+(i*8))
/// ELSE
/// result[j+63:j] := 0
/// FI
/// ENDFOR
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VPMASKMOVQ instruction.
///
/// \param __X
/// A pointer to the memory used for loading values.
/// \param __M
/// A 128-bit vector of [2 x i64] containing the mask bits.
/// \returns A 128-bit vector of [2 x i64] containing the loaded or zeroed
/// elements.
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskload_epi64(long long const *__X, __m128i __M)
{
return (__m128i)__builtin_ia32_maskloadq((const __v2di *)__X, (__v2di)__M);
}
/// Conditionally stores eight 32-bit integer elements from the 256-bit vector
/// of [8 x i32] in \a __Y to memory \a __X, if the most significant bit of
/// the corresponding element in the mask \a __M is set; otherwise, the
/// memory element is unchanged.
///
/// \code{.operation}
/// FOR i := 0 TO 7
/// j := i*32
/// IF __M[j+31] == 1
/// Store32(__X+(i*4), __Y[j+31:j])
/// FI
/// ENDFOR
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VPMASKMOVD instruction.
///
/// \param __X
/// A pointer to the memory used for storing values.
/// \param __M
/// A 256-bit vector of [8 x i32] containing the mask bits.
/// \param __Y
/// A 256-bit vector of [8 x i32] containing the values to store.
static __inline__ void __DEFAULT_FN_ATTRS256
_mm256_maskstore_epi32(int *__X, __m256i __M, __m256i __Y)
{
__builtin_ia32_maskstored256((__v8si *)__X, (__v8si)__M, (__v8si)__Y);
}
/// Conditionally stores four 64-bit integer elements from the 256-bit vector
/// of [4 x i64] in \a __Y to memory \a __X, if the most significant bit of
/// the corresponding element in the mask \a __M is set; otherwise, the
/// memory element is unchanged.
///
/// \code{.operation}
/// FOR i := 0 TO 3
/// j := i*64
/// IF __M[j+63] == 1
/// Store64(__X+(i*8), __Y[j+63:j])
/// FI
/// ENDFOR
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VPMASKMOVQ instruction.
///
/// \param __X
/// A pointer to the memory used for storing values.
/// \param __M
/// A 256-bit vector of [4 x i64] containing the mask bits.
/// \param __Y
/// A 256-bit vector of [4 x i64] containing the values to store.
static __inline__ void __DEFAULT_FN_ATTRS256
_mm256_maskstore_epi64(long long *__X, __m256i __M, __m256i __Y)
{
__builtin_ia32_maskstoreq256((__v4di *)__X, (__v4di)__M, (__v4di)__Y);
}
/// Conditionally stores four 32-bit integer elements from the 128-bit vector
/// of [4 x i32] in \a __Y to memory \a __X, if the most significant bit of
/// the corresponding element in the mask \a __M is set; otherwise, the
/// memory element is unchanged.
///
/// \code{.operation}
/// FOR i := 0 TO 3
/// j := i*32
/// IF __M[j+31] == 1
/// Store32(__X+(i*4), __Y[j+31:j])
/// FI
/// ENDFOR
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VPMASKMOVD instruction.
///
/// \param __X
/// A pointer to the memory used for storing values.
/// \param __M
/// A 128-bit vector of [4 x i32] containing the mask bits.
/// \param __Y
/// A 128-bit vector of [4 x i32] containing the values to store.
static __inline__ void __DEFAULT_FN_ATTRS128
_mm_maskstore_epi32(int *__X, __m128i __M, __m128i __Y)
{
__builtin_ia32_maskstored((__v4si *)__X, (__v4si)__M, (__v4si)__Y);
}
/// Conditionally stores two 64-bit integer elements from the 128-bit vector
/// of [2 x i64] in \a __Y to memory \a __X, if the most significant bit of
/// the corresponding element in the mask \a __M is set; otherwise, the
/// memory element is unchanged.
///
/// \code{.operation}
/// FOR i := 0 TO 1
/// j := i*64
/// IF __M[j+63] == 1
/// Store64(__X+(i*8), __Y[j+63:j])
/// FI
/// ENDFOR
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VPMASKMOVQ instruction.
///
/// \param __X
/// A pointer to the memory used for storing values.
/// \param __M
/// A 128-bit vector of [2 x i64] containing the mask bits.
/// \param __Y
/// A 128-bit vector of [2 x i64] containing the values to store.
static __inline__ void __DEFAULT_FN_ATTRS128
_mm_maskstore_epi64(long long *__X, __m128i __M, __m128i __Y)
{
__builtin_ia32_maskstoreq(( __v2di *)__X, (__v2di)__M, (__v2di)__Y);
}
/// Shifts each 32-bit element of the 256-bit vector of [8 x i32] in \a __X
/// left by the number of bits given in the corresponding element of the
/// 256-bit vector of [8 x i32] in \a __Y, shifting in zero bits, and
/// returns the result. If the shift count for any element is greater than
/// 31, the result for that element is zero.
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VPSLLVD instruction.
///
/// \param __X
/// A 256-bit vector of [8 x i32] to be shifted.
/// \param __Y
/// A 256-bit vector of [8 x i32] containing the unsigned shift counts (in
/// bits).
/// \returns A 256-bit vector of [8 x i32] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_sllv_epi32(__m256i __X, __m256i __Y)
{
return (__m256i)__builtin_ia32_psllv8si((__v8si)__X, (__v8si)__Y);
}
/// Shifts each 32-bit element of the 128-bit vector of [4 x i32] in \a __X
/// left by the number of bits given in the corresponding element of the
/// 128-bit vector of [4 x i32] in \a __Y, shifting in zero bits, and
/// returns the result. If the shift count for any element is greater than
/// 31, the result for that element is zero.
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VPSLLVD instruction.
///
/// \param __X
/// A 128-bit vector of [4 x i32] to be shifted.
/// \param __Y
/// A 128-bit vector of [4 x i32] containing the unsigned shift counts (in
/// bits).
/// \returns A 128-bit vector of [4 x i32] containing the result.
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_sllv_epi32(__m128i __X, __m128i __Y)
{
return (__m128i)__builtin_ia32_psllv4si((__v4si)__X, (__v4si)__Y);
}
/// Shifts each 64-bit element of the 256-bit vector of [4 x i64] in \a __X
/// left by the number of bits given in the corresponding element of the
/// 128-bit vector of [4 x i64] in \a __Y, shifting in zero bits, and
/// returns the result. If the shift count for any element is greater than
/// 63, the result for that element is zero.
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VPSLLVQ instruction.
///
/// \param __X
/// A 256-bit vector of [4 x i64] to be shifted.
/// \param __Y
/// A 256-bit vector of [4 x i64] containing the unsigned shift counts (in
/// bits).
/// \returns A 256-bit vector of [4 x i64] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_sllv_epi64(__m256i __X, __m256i __Y)
{
return (__m256i)__builtin_ia32_psllv4di((__v4di)__X, (__v4di)__Y);
}
/// Shifts each 64-bit element of the 128-bit vector of [2 x i64] in \a __X
/// left by the number of bits given in the corresponding element of the
/// 128-bit vector of [2 x i64] in \a __Y, shifting in zero bits, and
/// returns the result. If the shift count for any element is greater than
/// 63, the result for that element is zero.
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VPSLLVQ instruction.
///
/// \param __X
/// A 128-bit vector of [2 x i64] to be shifted.
/// \param __Y
/// A 128-bit vector of [2 x i64] containing the unsigned shift counts (in
/// bits).
/// \returns A 128-bit vector of [2 x i64] containing the result.
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_sllv_epi64(__m128i __X, __m128i __Y)
{
return (__m128i)__builtin_ia32_psllv2di((__v2di)__X, (__v2di)__Y);
}
/// Shifts each 32-bit element of the 256-bit vector of [8 x i32] in \a __X
/// right by the number of bits given in the corresponding element of the
/// 256-bit vector of [8 x i32] in \a __Y, shifting in sign bits, and
/// returns the result. If the shift count for any element is greater than
/// 31, the result for that element is 0 or -1 according to the sign bit
/// for that element.
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VPSRAVD instruction.
///
/// \param __X
/// A 256-bit vector of [8 x i32] to be shifted.
/// \param __Y
/// A 256-bit vector of [8 x i32] containing the unsigned shift counts (in
/// bits).
/// \returns A 256-bit vector of [8 x i32] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_srav_epi32(__m256i __X, __m256i __Y)
{
return (__m256i)__builtin_ia32_psrav8si((__v8si)__X, (__v8si)__Y);
}
/// Shifts each 32-bit element of the 128-bit vector of [4 x i32] in \a __X
/// right by the number of bits given in the corresponding element of the
/// 128-bit vector of [4 x i32] in \a __Y, shifting in sign bits, and
/// returns the result. If the shift count for any element is greater than
/// 31, the result for that element is 0 or -1 according to the sign bit
/// for that element.
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VPSRAVD instruction.
///
/// \param __X
/// A 128-bit vector of [4 x i32] to be shifted.
/// \param __Y
/// A 128-bit vector of [4 x i32] containing the unsigned shift counts (in
/// bits).
/// \returns A 128-bit vector of [4 x i32] containing the result.
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_srav_epi32(__m128i __X, __m128i __Y)
{
return (__m128i)__builtin_ia32_psrav4si((__v4si)__X, (__v4si)__Y);
}
/// Shifts each 32-bit element of the 256-bit vector of [8 x i32] in \a __X
/// right by the number of bits given in the corresponding element of the
/// 256-bit vector of [8 x i32] in \a __Y, shifting in zero bits, and
/// returns the result. If the shift count for any element is greater than
/// 31, the result for that element is zero.
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VPSRLVD instruction.
///
/// \param __X
/// A 256-bit vector of [8 x i32] to be shifted.
/// \param __Y
/// A 256-bit vector of [8 x i32] containing the unsigned shift counts (in
/// bits).
/// \returns A 256-bit vector of [8 x i32] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_srlv_epi32(__m256i __X, __m256i __Y)
{
return (__m256i)__builtin_ia32_psrlv8si((__v8si)__X, (__v8si)__Y);
}
/// Shifts each 32-bit element of the 128-bit vector of [4 x i32] in \a __X
/// right by the number of bits given in the corresponding element of the
/// 128-bit vector of [4 x i32] in \a __Y, shifting in zero bits, and
/// returns the result. If the shift count for any element is greater than
/// 31, the result for that element is zero.
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VPSRLVD instruction.
///
/// \param __X
/// A 128-bit vector of [4 x i32] to be shifted.
/// \param __Y
/// A 128-bit vector of [4 x i32] containing the unsigned shift counts (in
/// bits).
/// \returns A 128-bit vector of [4 x i32] containing the result.
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_srlv_epi32(__m128i __X, __m128i __Y)
{
return (__m128i)__builtin_ia32_psrlv4si((__v4si)__X, (__v4si)__Y);
}
/// Shifts each 64-bit element of the 256-bit vector of [4 x i64] in \a __X
/// right by the number of bits given in the corresponding element of the
/// 128-bit vector of [4 x i64] in \a __Y, shifting in zero bits, and
/// returns the result. If the shift count for any element is greater than
/// 63, the result for that element is zero.
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VPSRLVQ instruction.
///
/// \param __X
/// A 256-bit vector of [4 x i64] to be shifted.
/// \param __Y
/// A 256-bit vector of [4 x i64] containing the unsigned shift counts (in
/// bits).
/// \returns A 256-bit vector of [4 x i64] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_srlv_epi64(__m256i __X, __m256i __Y)
{
return (__m256i)__builtin_ia32_psrlv4di((__v4di)__X, (__v4di)__Y);
}
/// Shifts each 64-bit element of the 128-bit vector of [2 x i64] in \a __X
/// right by the number of bits given in the corresponding element of the
/// 128-bit vector of [2 x i64] in \a __Y, shifting in zero bits, and
/// returns the result. If the shift count for any element is greater than
/// 63, the result for that element is zero.
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VPSRLVQ instruction.
///
/// \param __X
/// A 128-bit vector of [2 x i64] to be shifted.
/// \param __Y
/// A 128-bit vector of [2 x i64] containing the unsigned shift counts (in
/// bits).
/// \returns A 128-bit vector of [2 x i64] containing the result.
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_srlv_epi64(__m128i __X, __m128i __Y)
{
return (__m128i)__builtin_ia32_psrlv2di((__v2di)__X, (__v2di)__Y);
}
/// Conditionally gathers two 64-bit floating-point values, either from the
/// 128-bit vector of [2 x double] in \a a, or from memory \a m using scaled
/// indexes from the 128-bit vector of [4 x i32] in \a i. The 128-bit vector
/// of [2 x double] in \a mask determines the source for each element.
///
/// \code{.operation}
/// FOR element := 0 to 1
/// j := element*64
/// k := element*32
/// IF mask[j+63] == 0
/// result[j+63:j] := a[j+63:j]
/// ELSE
/// result[j+63:j] := Load64(m + SignExtend(i[k+31:k])*s)
/// FI
/// ENDFOR
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// \code
/// __m128d _mm_mask_i32gather_pd(__m128d a, const double *m, __m128i i,
/// __m128d mask, const int s);
/// \endcode
///
/// This intrinsic corresponds to the \c VGATHERDPD instruction.
///
/// \param a
/// A 128-bit vector of [2 x double] used as the source when a mask bit is
/// zero.
/// \param m
/// A pointer to the memory used for loading values.
/// \param i
/// A 128-bit vector of [4 x i32] containing signed indexes into \a m. Only
/// the first two elements are used.
/// \param mask
/// A 128-bit vector of [2 x double] containing the mask. The most
/// significant bit of each element in the mask vector represents the mask
/// bits. If a mask bit is zero, the corresponding value from vector \a a
/// is gathered; otherwise the value is loaded from memory.
/// \param s
/// A literal constant scale factor for the indexes in \a i. Must be
/// 1, 2, 4, or 8.
/// \returns A 128-bit vector of [2 x double] containing the gathered values.
#define _mm_mask_i32gather_pd(a, m, i, mask, s) \
((__m128d)__builtin_ia32_gatherd_pd((__v2df)(__m128i)(a), \
(double const *)(m), \
(__v4si)(__m128i)(i), \
(__v2df)(__m128d)(mask), (s)))
/// Conditionally gathers four 64-bit floating-point values, either from the
/// 256-bit vector of [4 x double] in \a a, or from memory \a m using scaled
/// indexes from the 128-bit vector of [4 x i32] in \a i. The 256-bit vector
/// of [4 x double] in \a mask determines the source for each element.
///
/// \code{.operation}
/// FOR element := 0 to 3
/// j := element*64
/// k := element*32
/// IF mask[j+63] == 0
/// result[j+63:j] := a[j+63:j]
/// ELSE
/// result[j+63:j] := Load64(m + SignExtend(i[k+31:k])*s)
/// FI
/// ENDFOR
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// \code
/// __m256d _mm256_mask_i32gather_pd(__m256d a, const double *m, __m128i i,
/// __m256d mask, const int s);
/// \endcode
///
/// This intrinsic corresponds to the \c VGATHERDPD instruction.
///
/// \param a
/// A 256-bit vector of [4 x double] used as the source when a mask bit is
/// zero.
/// \param m
/// A pointer to the memory used for loading values.
/// \param i
/// A 128-bit vector of [4 x i32] containing signed indexes into \a m.
/// \param mask
/// A 256-bit vector of [4 x double] containing the mask. The most
/// significant bit of each element in the mask vector represents the mask
/// bits. If a mask bit is zero, the corresponding value from vector \a a
/// is gathered; otherwise the value is loaded from memory.
/// \param s
/// A literal constant scale factor for the indexes in \a i. Must be
/// 1, 2, 4, or 8.
/// \returns A 256-bit vector of [4 x double] containing the gathered values.
#define _mm256_mask_i32gather_pd(a, m, i, mask, s) \
((__m256d)__builtin_ia32_gatherd_pd256((__v4df)(__m256d)(a), \
(double const *)(m), \
(__v4si)(__m128i)(i), \
(__v4df)(__m256d)(mask), (s)))
/// Conditionally gathers two 64-bit floating-point values, either from the
/// 128-bit vector of [2 x double] in \a a, or from memory \a m using scaled
/// indexes from the 128-bit vector of [2 x i64] in \a i. The 128-bit vector
/// of [2 x double] in \a mask determines the source for each element.
///
/// \code{.operation}
/// FOR element := 0 to 1
/// j := element*64
/// k := element*64
/// IF mask[j+63] == 0
/// result[j+63:j] := a[j+63:j]
/// ELSE
/// result[j+63:j] := Load64(m + SignExtend(i[k+63:k])*s)
/// FI
/// ENDFOR
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// \code
/// __m128d _mm_mask_i64gather_pd(__m128d a, const double *m, __m128i i,
/// __m128d mask, const int s);
/// \endcode
///
/// This intrinsic corresponds to the \c VGATHERQPD instruction.
///
/// \param a
/// A 128-bit vector of [2 x double] used as the source when a mask bit is
/// zero.
/// \param m
/// A pointer to the memory used for loading values.
/// \param i
/// A 128-bit vector of [2 x i64] containing signed indexes into \a m.
/// \param mask
/// A 128-bit vector of [2 x double] containing the mask. The most
/// significant bit of each element in the mask vector represents the mask
/// bits. If a mask bit is zero, the corresponding value from vector \a a
/// is gathered; otherwise the value is loaded from memory.
/// \param s
/// A literal constant scale factor for the indexes in \a i. Must be
/// 1, 2, 4, or 8.
/// \returns A 128-bit vector of [2 x double] containing the gathered values.
#define _mm_mask_i64gather_pd(a, m, i, mask, s) \
((__m128d)__builtin_ia32_gatherq_pd((__v2df)(__m128d)(a), \
(double const *)(m), \
(__v2di)(__m128i)(i), \
(__v2df)(__m128d)(mask), (s)))
/// Conditionally gathers four 64-bit floating-point values, either from the
/// 256-bit vector of [4 x double] in \a a, or from memory \a m using scaled
/// indexes from the 256-bit vector of [4 x i64] in \a i. The 256-bit vector
/// of [4 x double] in \a mask determines the source for each element.
///
/// \code{.operation}
/// FOR element := 0 to 3
/// j := element*64
/// k := element*64
/// IF mask[j+63] == 0
/// result[j+63:j] := a[j+63:j]
/// ELSE
/// result[j+63:j] := Load64(m + SignExtend(i[k+63:k])*s)
/// FI
/// ENDFOR
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// \code
/// __m256d _mm256_mask_i64gather_pd(__m256d a, const double *m, __m256i i,
/// __m256d mask, const int s);
/// \endcode
///
/// This intrinsic corresponds to the \c VGATHERQPD instruction.
///
/// \param a
/// A 256-bit vector of [4 x double] used as the source when a mask bit is
/// zero.
/// \param m
/// A pointer to the memory used for loading values.
/// \param i
/// A 256-bit vector of [4 x i64] containing signed indexes into \a m.
/// \param mask
/// A 256-bit vector of [4 x double] containing the mask. The most
/// significant bit of each element in the mask vector represents the mask
/// bits. If a mask bit is zero, the corresponding value from vector \a a
/// is gathered; otherwise the value is loaded from memory.
/// \param s
/// A literal constant scale factor for the indexes in \a i. Must be
/// 1, 2, 4, or 8.
/// \returns A 256-bit vector of [4 x double] containing the gathered values.
#define _mm256_mask_i64gather_pd(a, m, i, mask, s) \
((__m256d)__builtin_ia32_gatherq_pd256((__v4df)(__m256d)(a), \
(double const *)(m), \
(__v4di)(__m256i)(i), \
(__v4df)(__m256d)(mask), (s)))
/// Conditionally gathers four 32-bit floating-point values, either from the
/// 128-bit vector of [4 x float] in \a a, or from memory \a m using scaled
/// indexes from the 128-bit vector of [4 x i32] in \a i. The 128-bit vector
/// of [4 x float] in \a mask determines the source for each element.
///
/// \code{.operation}
/// FOR element := 0 to 3
/// j := element*32
/// k := element*32
/// IF mask[j+31] == 0
/// result[j+31:j] := a[j+31:j]
/// ELSE
/// result[j+31:j] := Load32(m + SignExtend(i[k+31:k])*s)
/// FI
/// ENDFOR
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// \code
/// __m128 _mm_mask_i32gather_ps(__m128 a, const float *m, __m128i i,
/// __m128 mask, const int s);
/// \endcode
///
/// This intrinsic corresponds to the \c VGATHERDPS instruction.
///
/// \param a
/// A 128-bit vector of [4 x float] used as the source when a mask bit is
/// zero.
/// \param m
/// A pointer to the memory used for loading values.
/// \param i
/// A 128-bit vector of [4 x i32] containing signed indexes into \a m.
/// \param mask
/// A 128-bit vector of [4 x float] containing the mask. The most
/// significant bit of each element in the mask vector represents the mask
/// bits. If a mask bit is zero, the corresponding value from vector \a a
/// is gathered; otherwise the value is loaded from memory.
/// \param s
/// A literal constant scale factor for the indexes in \a i. Must be
/// 1, 2, 4, or 8.
/// \returns A 128-bit vector of [4 x float] containing the gathered values.
#define _mm_mask_i32gather_ps(a, m, i, mask, s) \
((__m128)__builtin_ia32_gatherd_ps((__v4sf)(__m128)(a), \
(float const *)(m), \
(__v4si)(__m128i)(i), \
(__v4sf)(__m128)(mask), (s)))
/// Conditionally gathers eight 32-bit floating-point values, either from the
/// 256-bit vector of [8 x float] in \a a, or from memory \a m using scaled
/// indexes from the 256-bit vector of [8 x i32] in \a i. The 256-bit vector
/// of [8 x float] in \a mask determines the source for each element.
///
/// \code{.operation}
/// FOR element := 0 to 7
/// j := element*32
/// k := element*32
/// IF mask[j+31] == 0
/// result[j+31:j] := a[j+31:j]
/// ELSE
/// result[j+31:j] := Load32(m + SignExtend(i[k+31:k])*s)
/// FI
/// ENDFOR
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// \code
/// __m256 _mm256_mask_i32gather_ps(__m256 a, const float *m, __m256i i,
/// __m256 mask, const int s);
/// \endcode
///
/// This intrinsic corresponds to the \c VGATHERDPS instruction.
///
/// \param a
/// A 256-bit vector of [8 x float] used as the source when a mask bit is
/// zero.
/// \param m
/// A pointer to the memory used for loading values.
/// \param i
/// A 256-bit vector of [8 x i32] containing signed indexes into \a m.
/// \param mask
/// A 256-bit vector of [8 x float] containing the mask. The most
/// significant bit of each element in the mask vector represents the mask
/// bits. If a mask bit is zero, the corresponding value from vector \a a
/// is gathered; otherwise the value is loaded from memory.
/// \param s
/// A literal constant scale factor for the indexes in \a i. Must be
/// 1, 2, 4, or 8.
/// \returns A 256-bit vector of [8 x float] containing the gathered values.
#define _mm256_mask_i32gather_ps(a, m, i, mask, s) \
((__m256)__builtin_ia32_gatherd_ps256((__v8sf)(__m256)(a), \
(float const *)(m), \
(__v8si)(__m256i)(i), \
(__v8sf)(__m256)(mask), (s)))
/// Conditionally gathers two 32-bit floating-point values, either from the
/// 128-bit vector of [4 x float] in \a a, or from memory \a m using scaled
/// indexes from the 128-bit vector of [2 x i64] in \a i. The 128-bit vector
/// of [4 x float] in \a mask determines the source for the lower two
/// elements. The upper two elements of the result are zeroed.
///
/// \code{.operation}
/// FOR element := 0 to 1
/// j := element*32
/// k := element*64
/// IF mask[j+31] == 0
/// result[j+31:j] := a[j+31:j]
/// ELSE
/// result[j+31:j] := Load32(m + SignExtend(i[k+63:k])*s)
/// FI
/// ENDFOR
/// result[127:64] := 0
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// \code
/// __m128 _mm_mask_i64gather_ps(__m128 a, const float *m, __m128i i,
/// __m128 mask, const int s);
/// \endcode
///
/// This intrinsic corresponds to the \c VGATHERQPS instruction.
///
/// \param a
/// A 128-bit vector of [4 x float] used as the source when a mask bit is
/// zero. Only the first two elements are used.
/// \param m
/// A pointer to the memory used for loading values.
/// \param i
/// A 128-bit vector of [2 x i64] containing signed indexes into \a m.
/// \param mask
/// A 128-bit vector of [4 x float] containing the mask. The most
/// significant bit of each element in the mask vector represents the mask
/// bits. If a mask bit is zero, the corresponding value from vector \a a
/// is gathered; otherwise the value is loaded from memory. Only the first
/// two elements are used.
/// \param s
/// A literal constant scale factor for the indexes in \a i. Must be
/// 1, 2, 4, or 8.
/// \returns A 128-bit vector of [4 x float] containing the gathered values.
#define _mm_mask_i64gather_ps(a, m, i, mask, s) \
((__m128)__builtin_ia32_gatherq_ps((__v4sf)(__m128)(a), \
(float const *)(m), \
(__v2di)(__m128i)(i), \
(__v4sf)(__m128)(mask), (s)))
/// Conditionally gathers four 32-bit floating-point values, either from the
/// 128-bit vector of [4 x float] in \a a, or from memory \a m using scaled
/// indexes from the 256-bit vector of [4 x i64] in \a i. The 128-bit vector
/// of [4 x float] in \a mask determines the source for each element.
///
/// \code{.operation}
/// FOR element := 0 to 3
/// j := element*32
/// k := element*64
/// IF mask[j+31] == 0
/// result[j+31:j] := a[j+31:j]
/// ELSE
/// result[j+31:j] := Load32(m + SignExtend(i[k+63:k])*s)
/// FI
/// ENDFOR
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// \code
/// __m128 _mm256_mask_i64gather_ps(__m128 a, const float *m, __m256i i,
/// __m128 mask, const int s);
/// \endcode
///
/// This intrinsic corresponds to the \c VGATHERQPS instruction.
///
/// \param a
/// A 128-bit vector of [4 x float] used as the source when a mask bit is
/// zero.
/// \param m
/// A pointer to the memory used for loading values.
/// \param i
/// A 256-bit vector of [4 x i64] containing signed indexes into \a m.
/// \param mask
/// A 128-bit vector of [4 x float] containing the mask. The most
/// significant bit of each element in the mask vector represents the mask
/// bits. If a mask bit is zero, the corresponding value from vector \a a
/// is gathered; otherwise the value is loaded from memory.
/// \param s
/// A literal constant scale factor for the indexes in \a i. Must be
/// 1, 2, 4, or 8.
/// \returns A 128-bit vector of [4 x float] containing the gathered values.
#define _mm256_mask_i64gather_ps(a, m, i, mask, s) \
((__m128)__builtin_ia32_gatherq_ps256((__v4sf)(__m128)(a), \
(float const *)(m), \
(__v4di)(__m256i)(i), \
(__v4sf)(__m128)(mask), (s)))
/// Conditionally gathers four 32-bit integer values, either from the
/// 128-bit vector of [4 x i32] in \a a, or from memory \a m using scaled
/// indexes from the 128-bit vector of [4 x i32] in \a i. The 128-bit vector
/// of [4 x i32] in \a mask determines the source for each element.
///
/// \code{.operation}
/// FOR element := 0 to 3
/// j := element*32
/// k := element*32
/// IF mask[j+31] == 0
/// result[j+31:j] := a[j+31:j]
/// ELSE
/// result[j+31:j] := Load32(m + SignExtend(i[k+31:k])*s)
/// FI
/// ENDFOR
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// \code
/// __m128i _mm_mask_i32gather_epi32(__m128i a, const int *m, __m128i i,
/// __m128i mask, const int s);
/// \endcode
///
/// This intrinsic corresponds to the \c VPGATHERDD instruction.
///
/// \param a
/// A 128-bit vector of [4 x i32] used as the source when a mask bit is
/// zero.
/// \param m
/// A pointer to the memory used for loading values.
/// \param i
/// A 128-bit vector of [4 x i32] containing signed indexes into \a m.
/// \param mask
/// A 128-bit vector of [4 x i32] containing the mask. The most significant
/// bit of each element in the mask vector represents the mask bits. If a
/// mask bit is zero, the corresponding value from vector \a a is gathered;
/// otherwise the value is loaded from memory.
/// \param s
/// A literal constant scale factor for the indexes in \a i. Must be
/// 1, 2, 4, or 8.
/// \returns A 128-bit vector of [4 x i32] containing the gathered values.
#define _mm_mask_i32gather_epi32(a, m, i, mask, s) \
((__m128i)__builtin_ia32_gatherd_d((__v4si)(__m128i)(a), \
(int const *)(m), \
(__v4si)(__m128i)(i), \
(__v4si)(__m128i)(mask), (s)))
/// Conditionally gathers eight 32-bit integer values, either from the
/// 256-bit vector of [8 x i32] in \a a, or from memory \a m using scaled
/// indexes from the 256-bit vector of [8 x i32] in \a i. The 256-bit vector
/// of [8 x i32] in \a mask determines the source for each element.
///
/// \code{.operation}
/// FOR element := 0 to 7
/// j := element*32
/// k := element*32
/// IF mask[j+31] == 0
/// result[j+31:j] := a[j+31:j]
/// ELSE
/// result[j+31:j] := Load32(m + SignExtend(i[k+31:k])*s)
/// FI
/// ENDFOR
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// \code
/// __m256i _mm256_mask_i32gather_epi32(__m256i a, const int *m, __m256i i,
/// __m256i mask, const int s);
/// \endcode
///
/// This intrinsic corresponds to the \c VPGATHERDD instruction.
///
/// \param a
/// A 256-bit vector of [8 x i32] used as the source when a mask bit is
/// zero.
/// \param m
/// A pointer to the memory used for loading values.
/// \param i
/// A 256-bit vector of [8 x i32] containing signed indexes into \a m.
/// \param mask
/// A 256-bit vector of [8 x i32] containing the mask. The most significant
/// bit of each element in the mask vector represents the mask bits. If a
/// mask bit is zero, the corresponding value from vector \a a is gathered;
/// otherwise the value is loaded from memory.
/// \param s
/// A literal constant scale factor for the indexes in \a i. Must be
/// 1, 2, 4, or 8.
/// \returns A 256-bit vector of [8 x i32] containing the gathered values.
#define _mm256_mask_i32gather_epi32(a, m, i, mask, s) \
((__m256i)__builtin_ia32_gatherd_d256((__v8si)(__m256i)(a), \
(int const *)(m), \
(__v8si)(__m256i)(i), \
(__v8si)(__m256i)(mask), (s)))
/// Conditionally gathers two 32-bit integer values, either from the
/// 128-bit vector of [4 x i32] in \a a, or from memory \a m using scaled
/// indexes from the 128-bit vector of [2 x i64] in \a i. The 128-bit vector
/// of [4 x i32] in \a mask determines the source for the lower two
/// elements. The upper two elements of the result are zeroed.
///
/// \code{.operation}
/// FOR element := 0 to 1
/// j := element*32
/// k := element*64
/// IF mask[j+31] == 0
/// result[j+31:j] := a[j+31:j]
/// ELSE
/// result[j+31:j] := Load32(m + SignExtend(i[k+63:k])*s)
/// FI
/// ENDFOR
/// result[127:64] := 0
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// \code
/// __m128i _mm_mask_i64gather_epi32(__m128i a, const int *m, __m128i i,
/// __m128i mask, const int s);
/// \endcode
///
/// This intrinsic corresponds to the \c VPGATHERQD instruction.
///
/// \param a
/// A 128-bit vector of [4 x i32] used as the source when a mask bit is
/// zero. Only the first two elements are used.
/// \param m
/// A pointer to the memory used for loading values.
/// \param i
/// A 128-bit vector of [2 x i64] containing indexes into \a m.
/// \param mask
/// A 128-bit vector of [4 x i32] containing the mask. The most significant
/// bit of each element in the mask vector represents the mask bits. If a
/// mask bit is zero, the corresponding value from vector \a a is gathered;
/// otherwise the value is loaded from memory. Only the first two elements
/// are used.
/// \param s
/// A literal constant scale factor for the indexes in \a i. Must be
/// 1, 2, 4, or 8.
/// \returns A 128-bit vector of [4 x i32] containing the gathered values.
#define _mm_mask_i64gather_epi32(a, m, i, mask, s) \
((__m128i)__builtin_ia32_gatherq_d((__v4si)(__m128i)(a), \
(int const *)(m), \
(__v2di)(__m128i)(i), \
(__v4si)(__m128i)(mask), (s)))
/// Conditionally gathers four 32-bit integer values, either from the
/// 128-bit vector of [4 x i32] in \a a, or from memory \a m using scaled
/// indexes from the 256-bit vector of [4 x i64] in \a i. The 128-bit vector
/// of [4 x i32] in \a mask determines the source for each element.
///
/// \code{.operation}
/// FOR element := 0 to 3
/// j := element*32
/// k := element*64
/// IF mask[j+31] == 0
/// result[j+31:j] := a[j+31:j]
/// ELSE
/// result[j+31:j] := Load32(m + SignExtend(i[k+63:k])*s)
/// FI
/// ENDFOR
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// \code
/// __m128i _mm256_mask_i64gather_epi32(__m128i a, const int *m, __m256i i,
/// __m128i mask, const int s);
/// \endcode
///
/// This intrinsic corresponds to the \c VPGATHERQD instruction.
///
/// \param a
/// A 128-bit vector of [4 x i32] used as the source when a mask bit is
/// zero.
/// \param m
/// A pointer to the memory used for loading values.
/// \param i
/// A 256-bit vector of [4 x i64] containing signed indexes into \a m.
/// \param mask
/// A 128-bit vector of [4 x i32] containing the mask. The most significant
/// bit of each element in the mask vector represents the mask bits. If a
/// mask bit is zero, the corresponding value from vector \a a is gathered;
/// otherwise the value is loaded from memory.
/// \param s
/// A literal constant scale factor for the indexes in \a i. Must be
/// 1, 2, 4, or 8.
/// \returns A 128-bit vector of [4 x i32] containing the gathered values.
#define _mm256_mask_i64gather_epi32(a, m, i, mask, s) \
((__m128i)__builtin_ia32_gatherq_d256((__v4si)(__m128i)(a), \
(int const *)(m), \
(__v4di)(__m256i)(i), \
(__v4si)(__m128i)(mask), (s)))
/// Conditionally gathers two 64-bit integer values, either from the
/// 128-bit vector of [2 x i64] in \a a, or from memory \a m using scaled
/// indexes from the 128-bit vector of [4 x i32] in \a i. The 128-bit vector
/// of [2 x i64] in \a mask determines the source for each element.
///
/// \code{.operation}
/// FOR element := 0 to 1
/// j := element*64
/// k := element*32
/// IF mask[j+63] == 0
/// result[j+63:j] := a[j+63:j]
/// ELSE
/// result[j+63:j] := Load64(m + SignExtend(i[k+31:k])*s)
/// FI
/// ENDFOR
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// \code
/// __m128i _mm_mask_i32gather_epi64(__m128i a, const long long *m, __m128i i,
/// __m128i mask, const int s);
/// \endcode
///
/// This intrinsic corresponds to the \c VPGATHERDQ instruction.
///
/// \param a
/// A 128-bit vector of [2 x i64] used as the source when a mask bit is
/// zero.
/// \param m
/// A pointer to the memory used for loading values.
/// \param i
/// A 128-bit vector of [4 x i32] containing signed indexes into \a m. Only
/// the first two elements are used.
/// \param mask
/// A 128-bit vector of [2 x i64] containing the mask. The most significant
/// bit of each element in the mask vector represents the mask bits. If a
/// mask bit is zero, the corresponding value from vector \a a is gathered;
/// otherwise the value is loaded from memory.
/// \param s
/// A literal constant scale factor for the indexes in \a i. Must be
/// 1, 2, 4, or 8.
/// \returns A 128-bit vector of [2 x i64] containing the gathered values.
#define _mm_mask_i32gather_epi64(a, m, i, mask, s) \
((__m128i)__builtin_ia32_gatherd_q((__v2di)(__m128i)(a), \
(long long const *)(m), \
(__v4si)(__m128i)(i), \
(__v2di)(__m128i)(mask), (s)))
/// Conditionally gathers four 64-bit integer values, either from the
/// 256-bit vector of [4 x i64] in \a a, or from memory \a m using scaled
/// indexes from the 128-bit vector of [4 x i32] in \a i. The 256-bit vector
/// of [4 x i64] in \a mask determines the source for each element.
///
/// \code{.operation}
/// FOR element := 0 to 3
/// j := element*64
/// k := element*32
/// IF mask[j+63] == 0
/// result[j+63:j] := a[j+63:j]
/// ELSE
/// result[j+63:j] := Load64(m + SignExtend(i[k+31:k])*s)
/// FI
/// ENDFOR
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// \code
/// __m256i _mm256_mask_i32gather_epi64(__m256i a, const long long *m,
/// __m128i i, __m256i mask, const int s);
/// \endcode
///
/// This intrinsic corresponds to the \c VPGATHERDQ instruction.
///
/// \param a
/// A 256-bit vector of [4 x i64] used as the source when a mask bit is
/// zero.
/// \param m
/// A pointer to the memory used for loading values.
/// \param i
/// A 128-bit vector of [4 x i32] containing signed indexes into \a m.
/// \param mask
/// A 256-bit vector of [4 x i64] containing the mask. The most significant
/// bit of each element in the mask vector represents the mask bits. If a
/// mask bit is zero, the corresponding value from vector \a a is gathered;
/// otherwise the value is loaded from memory.
/// \param s
/// A literal constant scale factor for the indexes in \a i. Must be
/// 1, 2, 4, or 8.
/// \returns A 256-bit vector of [4 x i64] containing the gathered values.
#define _mm256_mask_i32gather_epi64(a, m, i, mask, s) \
((__m256i)__builtin_ia32_gatherd_q256((__v4di)(__m256i)(a), \
(long long const *)(m), \
(__v4si)(__m128i)(i), \
(__v4di)(__m256i)(mask), (s)))
/// Conditionally gathers two 64-bit integer values, either from the
/// 128-bit vector of [2 x i64] in \a a, or from memory \a m using scaled
/// indexes from the 128-bit vector of [2 x i64] in \a i. The 128-bit vector
/// of [2 x i64] in \a mask determines the source for each element.
///
/// \code{.operation}
/// FOR element := 0 to 1
/// j := element*64
/// k := element*64
/// IF mask[j+63] == 0
/// result[j+63:j] := a[j+63:j]
/// ELSE
/// result[j+63:j] := Load64(m + SignExtend(i[k+63:k])*s)
/// FI
/// ENDFOR
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// \code
/// __m128i _mm_mask_i64gather_epi64(__m128i a, const long long *m, __m128i i,
/// __m128i mask, const int s);
/// \endcode
///
/// This intrinsic corresponds to the \c VPGATHERQQ instruction.
///
/// \param a
/// A 128-bit vector of [2 x i64] used as the source when a mask bit is
/// zero.
/// \param m
/// A pointer to the memory used for loading values.
/// \param i
/// A 128-bit vector of [2 x i64] containing signed indexes into \a m.
/// \param mask
/// A 128-bit vector of [2 x i64] containing the mask. The most significant
/// bit of each element in the mask vector represents the mask bits. If a
/// mask bit is zero, the corresponding value from vector \a a is gathered;
/// otherwise the value is loaded from memory.
/// \param s
/// A literal constant scale factor for the indexes in \a i. Must be
/// 1, 2, 4, or 8.
/// \returns A 128-bit vector of [2 x i64] containing the gathered values.
#define _mm_mask_i64gather_epi64(a, m, i, mask, s) \
((__m128i)__builtin_ia32_gatherq_q((__v2di)(__m128i)(a), \
(long long const *)(m), \
(__v2di)(__m128i)(i), \
(__v2di)(__m128i)(mask), (s)))
/// Conditionally gathers four 64-bit integer values, either from the
/// 256-bit vector of [4 x i64] in \a a, or from memory \a m using scaled
/// indexes from the 256-bit vector of [4 x i64] in \a i. The 256-bit vector
/// of [4 x i64] in \a mask determines the source for each element.
///
/// \code{.operation}
/// FOR element := 0 to 3
/// j := element*64
/// k := element*64
/// IF mask[j+63] == 0
/// result[j+63:j] := a[j+63:j]
/// ELSE
/// result[j+63:j] := Load64(m + SignExtend(i[k+63:k])*s)
/// FI
/// ENDFOR
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// \code
/// __m256i _mm256_mask_i64gather_epi64(__m256i a, const long long *m,
/// __m256i i, __m256i mask, const int s);
/// \endcode
///
/// This intrinsic corresponds to the \c VPGATHERQQ instruction.
///
/// \param a
/// A 256-bit vector of [4 x i64] used as the source when a mask bit is
/// zero.
/// \param m
/// A pointer to the memory used for loading values.
/// \param i
/// A 256-bit vector of [4 x i64] containing signed indexes into \a m.
/// \param mask
/// A 256-bit vector of [4 x i64] containing the mask. The most significant
/// bit of each element in the mask vector represents the mask bits. If a
/// mask bit is zero, the corresponding value from vector \a a is gathered;
/// otherwise the value is loaded from memory.
/// \param s
/// A literal constant scale factor for the indexes in \a i. Must be
/// 1, 2, 4, or 8.
/// \returns A 256-bit vector of [4 x i64] containing the gathered values.
#define _mm256_mask_i64gather_epi64(a, m, i, mask, s) \
((__m256i)__builtin_ia32_gatherq_q256((__v4di)(__m256i)(a), \
(long long const *)(m), \
(__v4di)(__m256i)(i), \
(__v4di)(__m256i)(mask), (s)))
/// Gathers two 64-bit floating-point values from memory \a m using scaled
/// indexes from the 128-bit vector of [4 x i32] in \a i.
///
/// \code{.operation}
/// FOR element := 0 to 1
/// j := element*64
/// k := element*32
/// result[j+63:j] := Load64(m + SignExtend(i[k+31:k])*s)
/// ENDFOR
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// \code
/// __m128d _mm_i32gather_pd(const double *m, __m128i i, const int s);
/// \endcode
///
/// This intrinsic corresponds to the \c VGATHERDPD instruction.
///
/// \param m
/// A pointer to the memory used for loading values.
/// \param i
/// A 128-bit vector of [4 x i32] containing signed indexes into \a m. Only
/// the first two elements are used.
/// \param s
/// A literal constant scale factor for the indexes in \a i. Must be
/// 1, 2, 4, or 8.
/// \returns A 128-bit vector of [2 x double] containing the gathered values.
#define _mm_i32gather_pd(m, i, s) \
((__m128d)__builtin_ia32_gatherd_pd((__v2df)_mm_undefined_pd(), \
(double const *)(m), \
@@ -1039,6 +4744,33 @@ _mm_srlv_epi64(__m128i __X, __m128i __Y)
_mm_setzero_pd()), \
(s)))
/// Gathers four 64-bit floating-point values from memory \a m using scaled
/// indexes from the 128-bit vector of [4 x i32] in \a i.
///
/// \code{.operation}
/// FOR element := 0 to 3
/// j := element*64
/// k := element*32
/// result[j+63:j] := Load64(m + SignExtend(i[k+31:k])*s)
/// ENDFOR
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// \code
/// __m256d _mm256_i32gather_pd(const double *m, __m128i i, const int s);
/// \endcode
///
/// This intrinsic corresponds to the \c VGATHERDPD instruction.
///
/// \param m
/// A pointer to the memory used for loading values.
/// \param i
/// A 128-bit vector of [4 x i32] containing signed indexes into \a m.
/// \param s
/// A literal constant scale factor for the indexes in \a i. Must be
/// 1, 2, 4, or 8.
/// \returns A 256-bit vector of [4 x double] containing the gathered values.
#define _mm256_i32gather_pd(m, i, s) \
((__m256d)__builtin_ia32_gatherd_pd256((__v4df)_mm256_undefined_pd(), \
(double const *)(m), \
@@ -1048,6 +4780,33 @@ _mm_srlv_epi64(__m128i __X, __m128i __Y)
_CMP_EQ_OQ), \
(s)))
/// Gathers two 64-bit floating-point values from memory \a m using scaled
/// indexes from the 128-bit vector of [2 x i64] in \a i.
///
/// \code{.operation}
/// FOR element := 0 to 1
/// j := element*64
/// k := element*64
/// result[j+63:j] := Load64(m + SignExtend(i[k+63:k])*s)
/// ENDFOR
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// \code
/// __m128d _mm_i64gather_pd(const double *m, __m128i i, const int s);
/// \endcode
///
/// This intrinsic corresponds to the \c VGATHERQPD instruction.
///
/// \param m
/// A pointer to the memory used for loading values.
/// \param i
/// A 128-bit vector of [2 x i64] containing signed indexes into \a m.
/// \param s
/// A literal constant scale factor for the indexes in \a i. Must be
/// 1, 2, 4, or 8.
/// \returns A 128-bit vector of [2 x double] containing the gathered values.
#define _mm_i64gather_pd(m, i, s) \
((__m128d)__builtin_ia32_gatherq_pd((__v2df)_mm_undefined_pd(), \
(double const *)(m), \
@@ -1056,6 +4815,33 @@ _mm_srlv_epi64(__m128i __X, __m128i __Y)
_mm_setzero_pd()), \
(s)))
/// Gathers four 64-bit floating-point values from memory \a m using scaled
/// indexes from the 256-bit vector of [4 x i64] in \a i.
///
/// \code{.operation}
/// FOR element := 0 to 3
/// j := element*64
/// k := element*64
/// result[j+63:j] := Load64(m + SignExtend(i[k+63:k])*s)
/// ENDFOR
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// \code
/// __m256d _mm256_i64gather_pd(const double *m, __m256i i, const int s);
/// \endcode
///
/// This intrinsic corresponds to the \c VGATHERQPD instruction.
///
/// \param m
/// A pointer to the memory used for loading values.
/// \param i
/// A 256-bit vector of [4 x i64] containing signed indexes into \a m.
/// \param s
/// A literal constant scale factor for the indexes in \a i. Must be
/// 1, 2, 4, or 8.
/// \returns A 256-bit vector of [4 x double] containing the gathered values.
#define _mm256_i64gather_pd(m, i, s) \
((__m256d)__builtin_ia32_gatherq_pd256((__v4df)_mm256_undefined_pd(), \
(double const *)(m), \
@@ -1065,6 +4851,33 @@ _mm_srlv_epi64(__m128i __X, __m128i __Y)
_CMP_EQ_OQ), \
(s)))
/// Gathers four 32-bit floating-point values from memory \a m using scaled
/// indexes from the 128-bit vector of [4 x i32] in \a i.
///
/// \code{.operation}
/// FOR element := 0 to 3
/// j := element*32
/// k := element*32
/// result[j+31:j] := Load32(m + SignExtend(i[k+31:k])*s)
/// ENDFOR
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// \code
/// __m128 _mm_i32gather_ps(const float *m, __m128i i, const int s);
/// \endcode
///
/// This intrinsic corresponds to the \c VGATHERDPS instruction.
///
/// \param m
/// A pointer to the memory used for loading values.
/// \param i
/// A 128-bit vector of [4 x i32] containing signed indexes into \a m.
/// \param s
/// A literal constant scale factor for the indexes in \a i. Must be
/// 1, 2, 4, or 8.
/// \returns A 128-bit vector of [4 x float] containing the gathered values.
#define _mm_i32gather_ps(m, i, s) \
((__m128)__builtin_ia32_gatherd_ps((__v4sf)_mm_undefined_ps(), \
(float const *)(m), \
@@ -1073,6 +4886,33 @@ _mm_srlv_epi64(__m128i __X, __m128i __Y)
_mm_setzero_ps()), \
(s)))
/// Gathers eight 32-bit floating-point values from memory \a m using scaled
/// indexes from the 256-bit vector of [8 x i32] in \a i.
///
/// \code{.operation}
/// FOR element := 0 to 7
/// j := element*32
/// k := element*32
/// result[j+31:j] := Load32(m + SignExtend(i[k+31:k])*s)
/// ENDFOR
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// \code
/// __m256 _mm256_i32gather_ps(const float *m, __m256i i, const int s);
/// \endcode
///
/// This intrinsic corresponds to the \c VGATHERDPS instruction.
///
/// \param m
/// A pointer to the memory used for loading values.
/// \param i
/// A 256-bit vector of [8 x i32] containing signed indexes into \a m.
/// \param s
/// A literal constant scale factor for the indexes in \a i. Must be
/// 1, 2, 4, or 8.
/// \returns A 256-bit vector of [8 x float] containing the gathered values.
#define _mm256_i32gather_ps(m, i, s) \
((__m256)__builtin_ia32_gatherd_ps256((__v8sf)_mm256_undefined_ps(), \
(float const *)(m), \
@@ -1082,6 +4922,35 @@ _mm_srlv_epi64(__m128i __X, __m128i __Y)
_CMP_EQ_OQ), \
(s)))
/// Gathers two 32-bit floating-point values from memory \a m using scaled
/// indexes from the 128-bit vector of [2 x i64] in \a i. The upper two
/// elements of the result are zeroed.
///
/// \code{.operation}
/// FOR element := 0 to 1
/// j := element*32
/// k := element*64
/// result[j+31:j] := Load32(m + SignExtend(i[k+63:k])*s)
/// ENDFOR
/// result[127:64] := 0
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// \code
/// __m128 _mm_i64gather_ps(const float *m, __m128i i, const int s);
/// \endcode
///
/// This intrinsic corresponds to the \c VGATHERQPS instruction.
///
/// \param m
/// A pointer to the memory used for loading values.
/// \param i
/// A 128-bit vector of [2 x i64] containing signed indexes into \a m.
/// \param s
/// A literal constant scale factor for the indexes in \a i. Must be
/// 1, 2, 4, or 8.
/// \returns A 128-bit vector of [4 x float] containing the gathered values.
#define _mm_i64gather_ps(m, i, s) \
((__m128)__builtin_ia32_gatherq_ps((__v4sf)_mm_undefined_ps(), \
(float const *)(m), \
@@ -1090,6 +4959,33 @@ _mm_srlv_epi64(__m128i __X, __m128i __Y)
_mm_setzero_ps()), \
(s)))
/// Gathers four 32-bit floating-point values from memory \a m using scaled
/// indexes from the 256-bit vector of [4 x i64] in \a i.
///
/// \code{.operation}
/// FOR element := 0 to 3
/// j := element*32
/// k := element*64
/// result[j+31:j] := Load32(m + SignExtend(i[k+64:k])*s)
/// ENDFOR
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// \code
/// __m128 _mm256_i64gather_ps(const float *m, __m256i i, const int s);
/// \endcode
///
/// This intrinsic corresponds to the \c VGATHERQPS instruction.
///
/// \param m
/// A pointer to the memory used for loading values.
/// \param i
/// A 256-bit vector of [4 x i64] containing signed indexes into \a m.
/// \param s
/// A literal constant scale factor for the indexes in \a i. Must be
/// 1, 2, 4, or 8.
/// \returns A 128-bit vector of [4 x float] containing the gathered values.
#define _mm256_i64gather_ps(m, i, s) \
((__m128)__builtin_ia32_gatherq_ps256((__v4sf)_mm_undefined_ps(), \
(float const *)(m), \
@@ -1098,44 +4994,263 @@ _mm_srlv_epi64(__m128i __X, __m128i __Y)
_mm_setzero_ps()), \
(s)))
/// Gathers four 32-bit floating-point values from memory \a m using scaled
/// indexes from the 128-bit vector of [4 x i32] in \a i.
///
/// \code{.operation}
/// FOR element := 0 to 3
/// j := element*32
/// k := element*32
/// result[j+31:j] := Load32(m + SignExtend(i[k+31:k])*s)
/// ENDFOR
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// \code
/// __m128i _mm_i32gather_epi32(const int *m, __m128i i, const int s);
/// \endcode
///
/// This intrinsic corresponds to the \c VPGATHERDD instruction.
///
/// \param m
/// A pointer to the memory used for loading values.
/// \param i
/// A 128-bit vector of [4 x i32] containing signed indexes into \a m.
/// \param s
/// A literal constant scale factor for the indexes in \a i. Must be
/// 1, 2, 4, or 8.
/// \returns A 128-bit vector of [4 x i32] containing the gathered values.
#define _mm_i32gather_epi32(m, i, s) \
((__m128i)__builtin_ia32_gatherd_d((__v4si)_mm_undefined_si128(), \
(int const *)(m), (__v4si)(__m128i)(i), \
(__v4si)_mm_set1_epi32(-1), (s)))
/// Gathers eight 32-bit floating-point values from memory \a m using scaled
/// indexes from the 256-bit vector of [8 x i32] in \a i.
///
/// \code{.operation}
/// FOR element := 0 to 7
/// j := element*32
/// k := element*32
/// result[j+31:j] := Load32(m + SignExtend(i[k+31:k])*s)
/// ENDFOR
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// \code
/// __m256i _mm256_i32gather_epi32(const int *m, __m256i i, const int s);
/// \endcode
///
/// This intrinsic corresponds to the \c VPGATHERDD instruction.
///
/// \param m
/// A pointer to the memory used for loading values.
/// \param i
/// A 256-bit vector of [8 x i32] containing signed indexes into \a m.
/// \param s
/// A literal constant scale factor for the indexes in \a i. Must be
/// 1, 2, 4, or 8.
/// \returns A 256-bit vector of [8 x i32] containing the gathered values.
#define _mm256_i32gather_epi32(m, i, s) \
((__m256i)__builtin_ia32_gatherd_d256((__v8si)_mm256_undefined_si256(), \
(int const *)(m), (__v8si)(__m256i)(i), \
(__v8si)_mm256_set1_epi32(-1), (s)))
/// Gathers two 32-bit integer values from memory \a m using scaled indexes
/// from the 128-bit vector of [2 x i64] in \a i. The upper two elements
/// of the result are zeroed.
///
/// \code{.operation}
/// FOR element := 0 to 1
/// j := element*32
/// k := element*64
/// result[j+31:j] := Load32(m + SignExtend(i[k+63:k])*s)
/// ENDFOR
/// result[127:64] := 0
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// \code
/// __m128i _mm_i64gather_epi32(const int *m, __m128i i, const int s);
/// \endcode
///
/// This intrinsic corresponds to the \c VPGATHERQD instruction.
///
/// \param m
/// A pointer to the memory used for loading values.
/// \param i
/// A 128-bit vector of [2 x i64] containing signed indexes into \a m.
/// \param s
/// A literal constant scale factor for the indexes in \a i. Must be
/// 1, 2, 4, or 8.
/// \returns A 128-bit vector of [4 x i32] containing the gathered values.
#define _mm_i64gather_epi32(m, i, s) \
((__m128i)__builtin_ia32_gatherq_d((__v4si)_mm_undefined_si128(), \
(int const *)(m), (__v2di)(__m128i)(i), \
(__v4si)_mm_set1_epi32(-1), (s)))
/// Gathers four 32-bit integer values from memory \a m using scaled indexes
/// from the 256-bit vector of [4 x i64] in \a i.
///
/// \code{.operation}
/// FOR element := 0 to 3
/// j := element*32
/// k := element*64
/// result[j+31:j] := Load32(m + SignExtend(i[k+63:k])*s)
/// ENDFOR
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// \code
/// __m128i _mm256_i64gather_epi32(const int *m, __m256i i, const int s);
/// \endcode
///
/// This intrinsic corresponds to the \c VPGATHERQD instruction.
///
/// \param m
/// A pointer to the memory used for loading values.
/// \param i
/// A 256-bit vector of [4 x i64] containing signed indexes into \a m.
/// \param s
/// A literal constant scale factor for the indexes in \a i. Must be
/// 1, 2, 4, or 8.
/// \returns A 128-bit vector of [4 x i32] containing the gathered values.
#define _mm256_i64gather_epi32(m, i, s) \
((__m128i)__builtin_ia32_gatherq_d256((__v4si)_mm_undefined_si128(), \
(int const *)(m), (__v4di)(__m256i)(i), \
(__v4si)_mm_set1_epi32(-1), (s)))
/// Gathers two 64-bit integer values from memory \a m using scaled indexes
/// from the 128-bit vector of [4 x i32] in \a i.
///
/// \code{.operation}
/// FOR element := 0 to 1
/// j := element*64
/// k := element*32
/// result[j+63:j] := Load64(m + SignExtend(i[k+31:k])*s)
/// ENDFOR
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// \code
/// __m128i _mm_i32gather_epi64(const long long *m, __m128i i, const int s);
/// \endcode
///
/// This intrinsic corresponds to the \c VPGATHERDQ instruction.
///
/// \param m
/// A pointer to the memory used for loading values.
/// \param i
/// A 128-bit vector of [4 x i32] containing signed indexes into \a m. Only
/// the first two elements are used.
/// \param s
/// A literal constant scale factor for the indexes in \a i. Must be
/// 1, 2, 4, or 8.
/// \returns A 128-bit vector of [2 x i64] containing the gathered values.
#define _mm_i32gather_epi64(m, i, s) \
((__m128i)__builtin_ia32_gatherd_q((__v2di)_mm_undefined_si128(), \
(long long const *)(m), \
(__v4si)(__m128i)(i), \
(__v2di)_mm_set1_epi64x(-1), (s)))
/// Gathers four 64-bit integer values from memory \a m using scaled indexes
/// from the 128-bit vector of [4 x i32] in \a i.
///
/// \code{.operation}
/// FOR element := 0 to 3
/// j := element*64
/// k := element*32
/// result[j+63:j] := Load64(m + SignExtend(i[k+31:k])*s)
/// ENDFOR
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// \code
/// __m256i _mm256_i32gather_epi64(const long long *m, __m128i i, const int s);
/// \endcode
///
/// This intrinsic corresponds to the \c VPGATHERDQ instruction.
///
/// \param m
/// A pointer to the memory used for loading values.
/// \param i
/// A 128-bit vector of [4 x i32] containing signed indexes into \a m.
/// \param s
/// A literal constant scale factor for the indexes in \a i. Must be
/// 1, 2, 4, or 8.
/// \returns A 256-bit vector of [4 x i64] containing the gathered values.
#define _mm256_i32gather_epi64(m, i, s) \
((__m256i)__builtin_ia32_gatherd_q256((__v4di)_mm256_undefined_si256(), \
(long long const *)(m), \
(__v4si)(__m128i)(i), \
(__v4di)_mm256_set1_epi64x(-1), (s)))
/// Gathers two 64-bit integer values from memory \a m using scaled indexes
/// from the 128-bit vector of [2 x i64] in \a i.
///
/// \code{.operation}
/// FOR element := 0 to 1
/// j := element*64
/// k := element*64
/// result[j+63:j] := Load64(m + SignExtend(i[k+63:k])*s)
/// ENDFOR
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// \code
/// __m128i _mm_i64gather_epi64(const long long *m, __m128i i, const int s);
/// \endcode
///
/// This intrinsic corresponds to the \c VPGATHERQQ instruction.
///
/// \param m
/// A pointer to the memory used for loading values.
/// \param i
/// A 128-bit vector of [2 x i64] containing signed indexes into \a m.
/// \param s
/// A literal constant scale factor for the indexes in \a i. Must be
/// 1, 2, 4, or 8.
/// \returns A 128-bit vector of [2 x i64] containing the gathered values.
#define _mm_i64gather_epi64(m, i, s) \
((__m128i)__builtin_ia32_gatherq_q((__v2di)_mm_undefined_si128(), \
(long long const *)(m), \
(__v2di)(__m128i)(i), \
(__v2di)_mm_set1_epi64x(-1), (s)))
/// Gathers four 64-bit integer values from memory \a m using scaled indexes
/// from the 256-bit vector of [4 x i64] in \a i.
///
/// \code{.operation}
/// FOR element := 0 to 3
/// j := element*64
/// k := element*64
/// result[j+63:j] := Load64(m + SignExtend(i[k+63:k])*s)
/// ENDFOR
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// \code
/// __m256i _mm256_i64gather_epi64(const long long *m, __m256i i, const int s);
/// \endcode
///
/// This intrinsic corresponds to the \c VPGATHERQQ instruction.
///
/// \param m
/// A pointer to the memory used for loading values.
/// \param i
/// A 256-bit vector of [4 x i64] containing signed indexes into \a m.
/// \param s
/// A literal constant scale factor for the indexes in \a i. Must be
/// 1, 2, 4, or 8.
/// \returns A 256-bit vector of [4 x i64] containing the gathered values.
#define _mm256_i64gather_epi64(m, i, s) \
((__m256i)__builtin_ia32_gatherq_q256((__v4di)_mm256_undefined_si256(), \
(long long const *)(m), \
+17 -7
View File
@@ -397,14 +397,15 @@ _mm512_broadcastsd_pd(__m128d __A)
static __inline __m512d __DEFAULT_FN_ATTRS512
_mm512_castpd256_pd512(__m256d __a)
{
return __builtin_shufflevector(__a, __a, 0, 1, 2, 3, -1, -1, -1, -1);
return __builtin_shufflevector(__a, __builtin_nondeterministic_value(__a), 0,
1, 2, 3, 4, 5, 6, 7);
}
static __inline __m512 __DEFAULT_FN_ATTRS512
_mm512_castps256_ps512(__m256 __a)
{
return __builtin_shufflevector(__a, __a, 0, 1, 2, 3, 4, 5, 6, 7,
-1, -1, -1, -1, -1, -1, -1, -1);
return __builtin_shufflevector(__a, __builtin_nondeterministic_value(__a), 0,
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
}
static __inline __m128d __DEFAULT_FN_ATTRS512
@@ -446,7 +447,10 @@ _mm512_castpd_si512 (__m512d __A)
static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_castpd128_pd512 (__m128d __A)
{
return __builtin_shufflevector( __A, __A, 0, 1, -1, -1, -1, -1, -1, -1);
__m256d __B = __builtin_nondeterministic_value(__B);
return __builtin_shufflevector(
__builtin_shufflevector(__A, __builtin_nondeterministic_value(__A), 0, 1, 2, 3),
__B, 0, 1, 2, 3, 4, 5, 6, 7);
}
static __inline __m512d __DEFAULT_FN_ATTRS512
@@ -464,19 +468,25 @@ _mm512_castps_si512 (__m512 __A)
static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_castps128_ps512 (__m128 __A)
{
return __builtin_shufflevector( __A, __A, 0, 1, 2, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1);
__m256 __B = __builtin_nondeterministic_value(__B);
return __builtin_shufflevector(
__builtin_shufflevector(__A, __builtin_nondeterministic_value(__A), 0, 1, 2, 3, 4, 5, 6, 7),
__B, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_castsi128_si512 (__m128i __A)
{
return __builtin_shufflevector( __A, __A, 0, 1, -1, -1, -1, -1, -1, -1);
__m256i __B = __builtin_nondeterministic_value(__B);
return __builtin_shufflevector(
__builtin_shufflevector(__A, __builtin_nondeterministic_value(__A), 0, 1, 2, 3),
__B, 0, 1, 2, 3, 4, 5, 6, 7);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_castsi256_si512 (__m256i __A)
{
return __builtin_shufflevector( __A, __A, 0, 1, 2, 3, -1, -1, -1, -1);
return __builtin_shufflevector( __A, __builtin_nondeterministic_value(__A), 0, 1, 2, 3, 4, 5, 6, 7);
}
static __inline __m512 __DEFAULT_FN_ATTRS512
+12 -8
View File
@@ -192,22 +192,26 @@ _mm512_castph512_ph256(__m512h __a) {
static __inline__ __m256h __DEFAULT_FN_ATTRS256
_mm256_castph128_ph256(__m128h __a) {
return __builtin_shufflevector(__a, __a, 0, 1, 2, 3, 4, 5, 6, 7, -1, -1, -1,
-1, -1, -1, -1, -1);
return __builtin_shufflevector(__a, __builtin_nondeterministic_value(__a),
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
}
static __inline__ __m512h __DEFAULT_FN_ATTRS512
_mm512_castph128_ph512(__m128h __a) {
return __builtin_shufflevector(__a, __a, 0, 1, 2, 3, 4, 5, 6, 7, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1, -1);
__m256h __b = __builtin_nondeterministic_value(__b);
return __builtin_shufflevector(
__builtin_shufflevector(__a, __builtin_nondeterministic_value(__a),
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15),
__b, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31);
}
static __inline__ __m512h __DEFAULT_FN_ATTRS512
_mm512_castph256_ph512(__m256h __a) {
return __builtin_shufflevector(__a, __a, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
12, 13, 14, 15, -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1);
return __builtin_shufflevector(__a, __builtin_nondeterministic_value(__a), 0,
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26,
27, 28, 29, 30, 31);
}
/// Constructs a 256-bit floating-point vector of [16 x half] from a
+20 -7
View File
@@ -3017,8 +3017,11 @@ _mm256_zeroupper(void)
static __inline __m128 __DEFAULT_FN_ATTRS128
_mm_broadcast_ss(float const *__a)
{
float __f = *__a;
return __extension__ (__m128)(__v4sf){ __f, __f, __f, __f };
struct __mm_broadcast_ss_struct {
float __f;
} __attribute__((__packed__, __may_alias__));
float __f = ((const struct __mm_broadcast_ss_struct*)__a)->__f;
return __extension__ (__m128){ __f, __f, __f, __f };
}
/// Loads a scalar double-precision floating point value from the
@@ -3036,7 +3039,10 @@ _mm_broadcast_ss(float const *__a)
static __inline __m256d __DEFAULT_FN_ATTRS
_mm256_broadcast_sd(double const *__a)
{
double __d = *__a;
struct __mm256_broadcast_sd_struct {
double __d;
} __attribute__((__packed__, __may_alias__));
double __d = ((const struct __mm256_broadcast_sd_struct*)__a)->__d;
return __extension__ (__m256d)(__v4df){ __d, __d, __d, __d };
}
@@ -3055,7 +3061,10 @@ _mm256_broadcast_sd(double const *__a)
static __inline __m256 __DEFAULT_FN_ATTRS
_mm256_broadcast_ss(float const *__a)
{
float __f = *__a;
struct __mm256_broadcast_ss_struct {
float __f;
} __attribute__((__packed__, __may_alias__));
float __f = ((const struct __mm256_broadcast_ss_struct*)__a)->__f;
return __extension__ (__m256)(__v8sf){ __f, __f, __f, __f, __f, __f, __f, __f };
}
@@ -4499,7 +4508,8 @@ _mm256_castsi256_si128(__m256i __a)
static __inline __m256d __DEFAULT_FN_ATTRS
_mm256_castpd128_pd256(__m128d __a)
{
return __builtin_shufflevector((__v2df)__a, (__v2df)__a, 0, 1, -1, -1);
return __builtin_shufflevector(
(__v2df)__a, (__v2df)__builtin_nondeterministic_value(__a), 0, 1, 2, 3);
}
/// Constructs a 256-bit floating-point vector of [8 x float] from a
@@ -4520,7 +4530,9 @@ _mm256_castpd128_pd256(__m128d __a)
static __inline __m256 __DEFAULT_FN_ATTRS
_mm256_castps128_ps256(__m128 __a)
{
return __builtin_shufflevector((__v4sf)__a, (__v4sf)__a, 0, 1, 2, 3, -1, -1, -1, -1);
return __builtin_shufflevector((__v4sf)__a,
(__v4sf)__builtin_nondeterministic_value(__a),
0, 1, 2, 3, 4, 5, 6, 7);
}
/// Constructs a 256-bit integer vector from a 128-bit integer vector.
@@ -4539,7 +4551,8 @@ _mm256_castps128_ps256(__m128 __a)
static __inline __m256i __DEFAULT_FN_ATTRS
_mm256_castsi128_si256(__m128i __a)
{
return __builtin_shufflevector((__v2di)__a, (__v2di)__a, 0, 1, -1, -1);
return __builtin_shufflevector(
(__v2di)__a, (__v2di)__builtin_nondeterministic_value(__a), 0, 1, 2, 3);
}
/// Constructs a 256-bit floating-point vector of [4 x double] from a
+473
View File
@@ -0,0 +1,473 @@
/*===----------- avxvnniint16intrin.h - AVXVNNIINT16 intrinsics-------------===
*
* Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
* See https://llvm.org/LICENSE.txt for license information.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*
*===-----------------------------------------------------------------------===
*/
#ifndef __IMMINTRIN_H
#error \
"Never use <avxvnniint16intrin.h> directly; include <immintrin.h> instead."
#endif // __IMMINTRIN_H
#ifndef __AVXVNNIINT16INTRIN_H
#define __AVXVNNIINT16INTRIN_H
/* Define the default attributes for the functions in this file. */
#define __DEFAULT_FN_ATTRS128 \
__attribute__((__always_inline__, __nodebug__, __target__("avxvnniint16"), \
__min_vector_width__(128)))
#define __DEFAULT_FN_ATTRS256 \
__attribute__((__always_inline__, __nodebug__, __target__("avxvnniint16"), \
__min_vector_width__(256)))
/// Multiply groups of 2 adjacent pairs of signed 16-bit integers in \a __A with
/// corresponding unsigned 16-bit integers in \a __B, producing 2 intermediate
/// signed 16-bit results. Sum these 2 results with the corresponding
/// 32-bit integer in \a __W, and store the packed 32-bit results in \a dst.
///
/// \headerfile <immintrin.h>
///
/// \code
/// __m128i _mm_dpwsud_epi32(__m128i __W, __m128i __A, __m128i __B)
/// \endcode
///
/// This intrinsic corresponds to the \c VPDPWSUD instruction.
///
/// \param __W
/// A 128-bit vector of [4 x int].
/// \param __A
/// A 128-bit vector of [8 x short].
/// \param __B
/// A 128-bit vector of [8 x unsigned short].
/// \returns
/// A 128-bit vector of [4 x int].
///
/// \code{.operation}
/// FOR j := 0 to 3
/// tmp1.dword := SignExtend32(__A.word[2*j]) * ZeroExtend32(__B.word[2*j])
/// tmp2.dword := SignExtend32(__A.word[2*j+1]) * ZeroExtend32(__B.word[2*j+1])
/// dst.dword[j] := __W.dword[j] + tmp1 + tmp2
/// ENDFOR
/// dst[MAX:128] := 0
/// \endcode
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_dpwsud_epi32(__m128i __W,
__m128i __A,
__m128i __B) {
return (__m128i)__builtin_ia32_vpdpwsud128((__v4si)__W, (__v4si)__A,
(__v4si)__B);
}
/// Multiply groups of 2 adjacent pairs of signed 16-bit integers in \a __A with
/// corresponding unsigned 16-bit integers in \a __B, producing 2 intermediate
/// signed 16-bit results. Sum these 2 results with the corresponding
/// 32-bit integer in \a __W, and store the packed 32-bit results in \a dst.
///
/// \headerfile <immintrin.h>
///
/// \code
/// __m256i _mm256_dpwsud_epi32(__m256i __W, __m256i __A, __m256i __B)
/// \endcode
///
/// This intrinsic corresponds to the \c VPDPWSUD instruction.
///
/// \param __W
/// A 256-bit vector of [8 x int].
/// \param __A
/// A 256-bit vector of [16 x short].
/// \param __B
/// A 256-bit vector of [16 x unsigned short].
/// \returns
/// A 256-bit vector of [8 x int].
///
/// \code{.operation}
/// FOR j := 0 to 7
/// tmp1.dword := SignExtend32(__A.word[2*j]) * ZeroExtend32(__B.word[2*j])
/// tmp2.dword := SignExtend32(__A.word[2*j+1]) * ZeroExtend32(__B.word[2*j+1])
/// dst.dword[j] := __W.dword[j] + tmp1 + tmp2
/// ENDFOR
/// dst[MAX:256] := 0
/// \endcode
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_dpwsud_epi32(__m256i __W, __m256i __A, __m256i __B) {
return (__m256i)__builtin_ia32_vpdpwsud256((__v8si)__W, (__v8si)__A,
(__v8si)__B);
}
/// Multiply groups of 2 adjacent pairs of signed 16-bit integers in \a __A with
/// corresponding unsigned 16-bit integers in \a __B, producing 2 intermediate
/// signed 16-bit results. Sum these 2 results with the corresponding
/// 32-bit integer in \a __W with signed saturation, and store the packed
/// 32-bit results in \a dst.
///
/// \headerfile <immintrin.h>
///
/// \code
/// __m128i _mm_dpwsuds_epi32(__m128i __W, __m128i __A, __m128i __B)
/// \endcode
///
/// This intrinsic corresponds to the \c VPDPWSUDS instruction.
///
/// \param __W
/// A 128-bit vector of [4 x int].
/// \param __A
/// A 128-bit vector of [8 x short].
/// \param __B
/// A 128-bit vector of [8 x unsigned short].
/// \returns
/// A 128-bit vector of [4 x int].
///
/// \code{.operation}
/// FOR j := 0 to 3
/// tmp1.dword := SignExtend32(__A.word[2*j]) * ZeroExtend32(__B.word[2*j])
/// tmp2.dword := SignExtend32(__A.word[2*j+1]) * ZeroExtend32(__B.word[2*j+1])
/// dst.dword[j] := SIGNED_DWORD_SATURATE(__W.dword[j] + tmp1 + tmp2)
/// ENDFOR
/// dst[MAX:128] := 0
/// \endcode
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_dpwsuds_epi32(__m128i __W,
__m128i __A,
__m128i __B) {
return (__m128i)__builtin_ia32_vpdpwsuds128((__v4si)__W, (__v4si)__A,
(__v4si)__B);
}
/// Multiply groups of 2 adjacent pairs of signed 16-bit integers in \a __A with
/// corresponding unsigned 16-bit integers in \a __B, producing 2 intermediate
/// signed 16-bit results. Sum these 2 results with the corresponding
/// 32-bit integer in \a __W with signed saturation, and store the packed
/// 32-bit results in \a dst.
///
/// \headerfile <immintrin.h>
///
/// \code
/// __m256i _mm256_dpwsuds_epi32(__m256i __W, __m256i __A, __m256i __B)
/// \endcode
///
/// This intrinsic corresponds to the \c VPDPWSUDS instruction.
///
/// \param __W
/// A 256-bit vector of [8 x int].
/// \param __A
/// A 256-bit vector of [16 x short].
/// \param __B
/// A 256-bit vector of [16 x unsigned short].
/// \returns
/// A 256-bit vector of [8 x int].
///
/// \code{.operation}
/// FOR j := 0 to 7
/// tmp1.dword := SignExtend32(__A.word[2*j]) * ZeroExtend32(__B.word[2*j])
/// tmp2.dword := SignExtend32(__A.word[2*j+1]) * ZeroExtend32(__B.word[2*j+1])
/// dst.dword[j] := SIGNED_DWORD_SATURATE(__W.dword[j] + tmp1 + tmp2)
/// ENDFOR
/// dst[MAX:256] := 0
/// \endcode
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_dpwsuds_epi32(__m256i __W, __m256i __A, __m256i __B) {
return (__m256i)__builtin_ia32_vpdpwsuds256((__v8si)__W, (__v8si)__A,
(__v8si)__B);
}
/// Multiply groups of 2 adjacent pairs of unsigned 16-bit integers in \a __A with
/// corresponding signed 16-bit integers in \a __B, producing 2 intermediate
/// signed 16-bit results. Sum these 2 results with the corresponding
/// 32-bit integer in \a __W, and store the packed 32-bit results in \a dst.
///
/// \headerfile <immintrin.h>
///
/// \code
/// __m128i _mm_dpbusd_epi32(__m128i __W, __m128i __A, __m128i __B)
/// \endcode
///
/// This intrinsic corresponds to the \c VPDPWUSD instruction.
///
/// \param __W
/// A 128-bit vector of [4 x int].
/// \param __A
/// A 128-bit vector of [8 x unsigned short].
/// \param __B
/// A 128-bit vector of [8 x short].
/// \returns
/// A 128-bit vector of [4 x int].
///
/// \code{.operation}
/// FOR j := 0 to 3
/// tmp1.dword := ZeroExtend32(__A.word[2*j]) * SignExtend32(__B.word[2*j])
/// tmp2.dword := ZeroExtend32(__A.word[2*j+1]) * SignExtend32(__B.word[2*j+1])
/// dst.dword[j] := __W.dword[j] + tmp1 + tmp2
/// ENDFOR
/// dst[MAX:128] := 0
/// \endcode
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_dpwusd_epi32(__m128i __W,
__m128i __A,
__m128i __B) {
return (__m128i)__builtin_ia32_vpdpwusd128((__v4si)__W, (__v4si)__A,
(__v4si)__B);
}
/// Multiply groups of 2 adjacent pairs of unsigned 16-bit integers in \a __A with
/// corresponding signed 16-bit integers in \a __B, producing 2 intermediate
/// signed 16-bit results. Sum these 2 results with the corresponding
/// 32-bit integer in \a __W, and store the packed 32-bit results in \a dst.
///
/// \headerfile <immintrin.h>
///
/// \code
/// __m256i _mm256_dpwusd_epi32(__m256i __W, __m256i __A, __m256i __B)
/// \endcode
///
/// This intrinsic corresponds to the \c VPDPWUSD instruction.
///
/// \param __W
/// A 256-bit vector of [8 x int].
/// \param __A
/// A 256-bit vector of [16 x unsigned short].
/// \param __B
/// A 256-bit vector of [16 x short].
/// \returns
/// A 256-bit vector of [8 x int].
///
/// \code{.operation}
/// FOR j := 0 to 7
/// tmp1.dword := ZeroExtend32(__A.word[2*j]) * SignExtend32(__B.word[2*j])
/// tmp2.dword := ZeroExtend32(__A.word[2*j+1]) * SignExtend32(__B.word[2*j+1])
/// dst.dword[j] := __W.dword[j] + tmp1 + tmp2
/// ENDFOR
/// dst[MAX:256] := 0
/// \endcode
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_dpwusd_epi32(__m256i __W, __m256i __A, __m256i __B) {
return (__m256i)__builtin_ia32_vpdpwusd256((__v8si)__W, (__v8si)__A,
(__v8si)__B);
}
/// Multiply groups of 2 adjacent pairs of unsigned 16-bit integers in \a __A with
/// corresponding signed 16-bit integers in \a __B, producing 2 intermediate
/// signed 16-bit results. Sum these 2 results with the corresponding
/// 32-bit integer in \a __W with signed saturation, and store the packed
/// 32-bit results in \a dst.
///
/// \headerfile <immintrin.h>
///
/// \code
/// __m128i _mm_dpwusds_epi32(__m128i __W, __m128i __A, __m128i __B)
/// \endcode
///
/// This intrinsic corresponds to the \c VPDPWSUDS instruction.
///
/// \param __W
/// A 128-bit vector of [4 x int].
/// \param __A
/// A 128-bit vector of [8 x unsigned short].
/// \param __B
/// A 128-bit vector of [8 x short].
/// \returns
/// A 128-bit vector of [4 x int].
///
/// \code{.operation}
/// FOR j := 0 to 3
/// tmp1.dword := ZeroExtend32(__A.word[2*j]) * SignExtend32(__B.word[2*j])
/// tmp2.dword := ZeroExtend32(__A.word[2*j+1]) * SignExtend32(__B.word[2*j+1])
/// dst.dword[j] := SIGNED_DWORD_SATURATE(__W.dword[j] + tmp1 + tmp2)
/// ENDFOR
/// dst[MAX:128] := 0
/// \endcode
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_dpwusds_epi32(__m128i __W,
__m128i __A,
__m128i __B) {
return (__m128i)__builtin_ia32_vpdpwusds128((__v4si)__W, (__v4si)__A,
(__v4si)__B);
}
/// Multiply groups of 2 adjacent pairs of unsigned 16-bit integers in \a __A with
/// corresponding signed 16-bit integers in \a __B, producing 2 intermediate
/// signed 16-bit results. Sum these 2 results with the corresponding
/// 32-bit integer in \a __W with signed saturation, and store the packed
/// 32-bit results in \a dst.
///
/// \headerfile <immintrin.h>
///
/// \code
/// __m256i _mm256_dpwsuds_epi32(__m256i __W, __m256i __A, __m256i __B)
/// \endcode
///
/// This intrinsic corresponds to the \c VPDPWSUDS instruction.
///
/// \param __W
/// A 256-bit vector of [8 x int].
/// \param __A
/// A 256-bit vector of [16 x unsigned short].
/// \param __B
/// A 256-bit vector of [16 x short].
/// \returns
/// A 256-bit vector of [8 x int].
///
/// \code{.operation}
/// FOR j := 0 to 7
/// tmp1.dword := ZeroExtend32(__A.word[2*j]) * SignExtend32(__B.word[2*j])
/// tmp2.dword := ZeroExtend32(__A.word[2*j+1]) * SignExtend32(__B.word[2*j+1])
/// dst.dword[j] := SIGNED_DWORD_SATURATE(__W.dword[j] + tmp1 + tmp2)
/// ENDFOR
/// dst[MAX:256] := 0
/// \endcode
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_dpwusds_epi32(__m256i __W, __m256i __A, __m256i __B) {
return (__m256i)__builtin_ia32_vpdpwusds256((__v8si)__W, (__v8si)__A,
(__v8si)__B);
}
/// Multiply groups of 2 adjacent pairs of unsigned 16-bit integers in \a __A with
/// corresponding unsigned 16-bit integers in \a __B, producing 2 intermediate
/// signed 16-bit results. Sum these 2 results with the corresponding
/// 32-bit integer in \a __W, and store the packed 32-bit results in \a dst.
///
/// \headerfile <immintrin.h>
///
/// \code
/// __m128i _mm_dpwuud_epi32(__m128i __W, __m128i __A, __m128i __B)
/// \endcode
///
/// This intrinsic corresponds to the \c VPDPWUUD instruction.
///
/// \param __W
/// A 128-bit vector of [4 x unsigned int].
/// \param __A
/// A 128-bit vector of [8 x unsigned short].
/// \param __B
/// A 128-bit vector of [8 x unsigned short].
/// \returns
/// A 128-bit vector of [4 x unsigned int].
///
/// \code{.operation}
/// FOR j := 0 to 3
/// tmp1.dword := ZeroExtend32(__A.word[2*j]) * ZeroExtend32(__B.word[2*j])
/// tmp2.dword := ZeroExtend32(__A.word[2*j+1]) * ZeroExtend32(__B.word[2*j+1])
/// dst.dword[j] := __W.dword[j] + tmp1 + tmp2
/// ENDFOR
/// dst[MAX:128] := 0
/// \endcode
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_dpwuud_epi32(__m128i __W,
__m128i __A,
__m128i __B) {
return (__m128i)__builtin_ia32_vpdpwuud128((__v4si)__W, (__v4si)__A,
(__v4si)__B);
}
/// Multiply groups of 2 adjacent pairs of unsigned 16-bit integers in \a __A with
/// corresponding unsigned 16-bit integers in \a __B, producing 2 intermediate
/// signed 16-bit results. Sum these 2 results with the corresponding
/// 32-bit integer in \a __W, and store the packed 32-bit results in \a dst.
///
/// \headerfile <immintrin.h>
///
/// \code
/// __m256i _mm256_dpwuud_epi32(__m256i __W, __m256i __A, __m256i __B)
/// \endcode
///
/// This intrinsic corresponds to the \c VPDPWUUD instruction.
///
/// \param __W
/// A 256-bit vector of [8 x unsigned int].
/// \param __A
/// A 256-bit vector of [16 x unsigned short].
/// \param __B
/// A 256-bit vector of [16 x unsigned short].
/// \returns
/// A 256-bit vector of [8 x unsigned int].
///
/// \code{.operation}
/// FOR j := 0 to 7
/// tmp1.dword := ZeroExtend32(__A.word[2*j]) * ZeroExtend32(__B.word[2*j])
/// tmp2.dword := ZeroExtend32(__A.word[2*j+1]) * ZeroExtend32(__B.word[2*j+1])
/// dst.dword[j] := __W.dword[j] + tmp1 + tmp2
/// ENDFOR
/// dst[MAX:256] := 0
/// \endcode
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_dpwuud_epi32(__m256i __W, __m256i __A, __m256i __B) {
return (__m256i)__builtin_ia32_vpdpwuud256((__v8si)__W, (__v8si)__A,
(__v8si)__B);
}
/// Multiply groups of 2 adjacent pairs of unsigned 16-bit integers in \a __A with
/// corresponding unsigned 16-bit integers in \a __B, producing 2 intermediate
/// signed 16-bit results. Sum these 2 results with the corresponding
/// 32-bit integer in \a __W with signed saturation, and store the packed
/// 32-bit results in \a dst.
///
/// \headerfile <immintrin.h>
///
/// \code
/// __m128i _mm_dpwsuds_epi32(__m128i __W, __m128i __A, __m128i __B)
/// \endcode
///
/// This intrinsic corresponds to the \c VPDPWSUDS instruction.
///
/// \param __W
/// A 128-bit vector of [4 x unsigned int].
/// \param __A
/// A 128-bit vector of [8 x unsigned short].
/// \param __B
/// A 128-bit vector of [8 x unsigned short].
/// \returns
/// A 128-bit vector of [4 x unsigned int].
///
/// \code{.operation}
/// FOR j := 0 to 3
/// tmp1.dword := ZeroExtend32(__A.word[2*j]) * ZeroExtend32(__B.word[2*j])
/// tmp2.dword := ZeroExtend32(__A.word[2*j+1]) * ZeroExtend32(__B.word[2*j+1])
/// dst.dword[j] := UNSIGNED_DWORD_SATURATE(__W.dword[j] + tmp1 + tmp2)
/// ENDFOR
/// dst[MAX:128] := 0
/// \endcode
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_dpwuuds_epi32(__m128i __W,
__m128i __A,
__m128i __B) {
return (__m128i)__builtin_ia32_vpdpwuuds128((__v4si)__W, (__v4si)__A,
(__v4si)__B);
}
/// Multiply groups of 2 adjacent pairs of unsigned 16-bit integers in \a __A with
/// corresponding unsigned 16-bit integers in \a __B, producing 2 intermediate
/// signed 16-bit results. Sum these 2 results with the corresponding
/// 32-bit integer in \a __W with signed saturation, and store the packed
/// 32-bit results in \a dst.
///
/// \headerfile <immintrin.h>
///
/// \code
/// __m256i _mm256_dpwuuds_epi32(__m256i __W, __m256i __A, __m256i __B)
/// \endcode
///
/// This intrinsic corresponds to the \c VPDPWSUDS instruction.
///
/// \param __W
/// A 256-bit vector of [8 x unsigned int].
/// \param __A
/// A 256-bit vector of [16 x unsigned short].
/// \param __B
/// A 256-bit vector of [16 x unsigned short].
/// \returns
/// A 256-bit vector of [8 x unsigned int].
///
/// \code{.operation}
/// FOR j := 0 to 7
/// tmp1.dword := ZeroExtend32(__A.word[2*j]) * ZeroExtend32(__B.word[2*j])
/// tmp2.dword := ZeroExtend32(__A.word[2*j+1]) * ZeroExtend32(__B.word[2*j+1])
/// dst.dword[j] := UNSIGNED_DWORD_SATURATE(__W.dword[j] + tmp1 + tmp2)
/// ENDFOR
/// dst[MAX:256] := 0
/// \endcode
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_dpwuuds_epi32(__m256i __W, __m256i __A, __m256i __B) {
return (__m256i)__builtin_ia32_vpdpwuuds256((__v8si)__W, (__v8si)__A,
(__v8si)__B);
}
#undef __DEFAULT_FN_ATTRS128
#undef __DEFAULT_FN_ATTRS256
#endif // __AVXVNNIINT16INTRIN_H
+187 -13
View File
@@ -7,8 +7,8 @@
*===-----------------------------------------------------------------------===
*/
#if !defined __X86INTRIN_H && !defined __IMMINTRIN_H
#error "Never use <bmi2intrin.h> directly; include <x86intrin.h> instead."
#ifndef __IMMINTRIN_H
#error "Never use <bmi2intrin.h> directly; include <immintrin.h> instead."
#endif
#ifndef __BMI2INTRIN_H
@@ -17,44 +17,228 @@
/* Define the default attributes for the functions in this file. */
#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("bmi2")))
/// Copies the unsigned 32-bit integer \a __X and zeroes the upper bits
/// starting at bit number \a __Y.
///
/// \code{.operation}
/// i := __Y[7:0]
/// result := __X
/// IF i < 32
/// result[31:i] := 0
/// FI
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c BZHI instruction.
///
/// \param __X
/// The 32-bit source value to copy.
/// \param __Y
/// The lower 8 bits specify the bit number of the lowest bit to zero.
/// \returns The partially zeroed 32-bit value.
static __inline__ unsigned int __DEFAULT_FN_ATTRS
_bzhi_u32(unsigned int __X, unsigned int __Y)
{
return __builtin_ia32_bzhi_si(__X, __Y);
}
/// Deposit (scatter) low-order bits from the unsigned 32-bit integer \a __X
/// into the 32-bit result, according to the mask in the unsigned 32-bit
/// integer \a __Y. All other bits of the result are zero.
///
/// \code{.operation}
/// i := 0
/// result := 0
/// FOR m := 0 TO 31
/// IF __Y[m] == 1
/// result[m] := __X[i]
/// i := i + 1
/// ENDIF
/// ENDFOR
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c PDEP instruction.
///
/// \param __X
/// The 32-bit source value to copy.
/// \param __Y
/// The 32-bit mask specifying where to deposit source bits.
/// \returns The 32-bit result.
static __inline__ unsigned int __DEFAULT_FN_ATTRS
_pdep_u32(unsigned int __X, unsigned int __Y)
{
return __builtin_ia32_pdep_si(__X, __Y);
}
/// Extract (gather) bits from the unsigned 32-bit integer \a __X into the
/// low-order bits of the 32-bit result, according to the mask in the
/// unsigned 32-bit integer \a __Y. All other bits of the result are zero.
///
/// \code{.operation}
/// i := 0
/// result := 0
/// FOR m := 0 TO 31
/// IF __Y[m] == 1
/// result[i] := __X[m]
/// i := i + 1
/// ENDIF
/// ENDFOR
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c PEXT instruction.
///
/// \param __X
/// The 32-bit source value to copy.
/// \param __Y
/// The 32-bit mask specifying which source bits to extract.
/// \returns The 32-bit result.
static __inline__ unsigned int __DEFAULT_FN_ATTRS
_pext_u32(unsigned int __X, unsigned int __Y)
{
return __builtin_ia32_pext_si(__X, __Y);
}
/// Multiplies the unsigned 32-bit integers \a __X and \a __Y to form a
/// 64-bit product. Stores the upper 32 bits of the product in the
/// memory at \a __P and returns the lower 32 bits.
///
/// \code{.operation}
/// Store32(__P, (__X * __Y)[63:32])
/// result := (__X * __Y)[31:0]
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c MULX instruction.
///
/// \param __X
/// An unsigned 32-bit multiplicand.
/// \param __Y
/// An unsigned 32-bit multiplicand.
/// \param __P
/// A pointer to memory for storing the upper half of the product.
/// \returns The lower half of the product.
static __inline__ unsigned int __DEFAULT_FN_ATTRS
_mulx_u32(unsigned int __X, unsigned int __Y, unsigned int *__P)
{
unsigned long long __res = (unsigned long long) __X * __Y;
*__P = (unsigned int)(__res >> 32);
return (unsigned int)__res;
}
#ifdef __x86_64__
/// Copies the unsigned 64-bit integer \a __X and zeroes the upper bits
/// starting at bit number \a __Y.
///
/// \code{.operation}
/// i := __Y[7:0]
/// result := __X
/// IF i < 64
/// result[63:i] := 0
/// FI
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c BZHI instruction.
///
/// \param __X
/// The 64-bit source value to copy.
/// \param __Y
/// The lower 8 bits specify the bit number of the lowest bit to zero.
/// \returns The partially zeroed 64-bit value.
static __inline__ unsigned long long __DEFAULT_FN_ATTRS
_bzhi_u64(unsigned long long __X, unsigned long long __Y)
{
return __builtin_ia32_bzhi_di(__X, __Y);
}
/// Deposit (scatter) low-order bits from the unsigned 64-bit integer \a __X
/// into the 64-bit result, according to the mask in the unsigned 64-bit
/// integer \a __Y. All other bits of the result are zero.
///
/// \code{.operation}
/// i := 0
/// result := 0
/// FOR m := 0 TO 63
/// IF __Y[m] == 1
/// result[m] := __X[i]
/// i := i + 1
/// ENDIF
/// ENDFOR
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c PDEP instruction.
///
/// \param __X
/// The 64-bit source value to copy.
/// \param __Y
/// The 64-bit mask specifying where to deposit source bits.
/// \returns The 64-bit result.
static __inline__ unsigned long long __DEFAULT_FN_ATTRS
_pdep_u64(unsigned long long __X, unsigned long long __Y)
{
return __builtin_ia32_pdep_di(__X, __Y);
}
/// Extract (gather) bits from the unsigned 64-bit integer \a __X into the
/// low-order bits of the 64-bit result, according to the mask in the
/// unsigned 64-bit integer \a __Y. All other bits of the result are zero.
///
/// \code{.operation}
/// i := 0
/// result := 0
/// FOR m := 0 TO 63
/// IF __Y[m] == 1
/// result[i] := __X[m]
/// i := i + 1
/// ENDIF
/// ENDFOR
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c PEXT instruction.
///
/// \param __X
/// The 64-bit source value to copy.
/// \param __Y
/// The 64-bit mask specifying which source bits to extract.
/// \returns The 64-bit result.
static __inline__ unsigned long long __DEFAULT_FN_ATTRS
_pext_u64(unsigned long long __X, unsigned long long __Y)
{
return __builtin_ia32_pext_di(__X, __Y);
}
/// Multiplies the unsigned 64-bit integers \a __X and \a __Y to form a
/// 128-bit product. Stores the upper 64 bits of the product to the
/// memory addressed by \a __P and returns the lower 64 bits.
///
/// \code{.operation}
/// Store64(__P, (__X * __Y)[127:64])
/// result := (__X * __Y)[63:0]
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c MULX instruction.
///
/// \param __X
/// An unsigned 64-bit multiplicand.
/// \param __Y
/// An unsigned 64-bit multiplicand.
/// \param __P
/// A pointer to memory for storing the upper half of the product.
/// \returns The lower half of the product.
static __inline__ unsigned long long __DEFAULT_FN_ATTRS
_mulx_u64 (unsigned long long __X, unsigned long long __Y,
unsigned long long *__P)
@@ -64,17 +248,7 @@ _mulx_u64 (unsigned long long __X, unsigned long long __Y,
return (unsigned long long) __res;
}
#else /* !__x86_64__ */
static __inline__ unsigned int __DEFAULT_FN_ATTRS
_mulx_u32 (unsigned int __X, unsigned int __Y, unsigned int *__P)
{
unsigned long long __res = (unsigned long long) __X * __Y;
*__P = (unsigned int) (__res >> 32);
return (unsigned int) __res;
}
#endif /* !__x86_64__ */
#endif /* __x86_64__ */
#undef __DEFAULT_FN_ATTRS
+9
View File
@@ -17,6 +17,15 @@
/* Define the default attributes for the functions in this file. */
#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("clflushopt")))
/// Invalidates all levels of the cache hierarchy and flushes modified data to
/// memory for the cache line specified by the address \a __m.
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c CLFLUSHOPT instruction.
///
/// \param __m
/// An address within the cache line to flush and invalidate.
static __inline__ void __DEFAULT_FN_ATTRS
_mm_clflushopt(void const * __m) {
__builtin_ia32_clflushopt(__m);
+7 -5
View File
@@ -6,7 +6,7 @@
*
*===-----------------------------------------------------------------------===
*/
#if !defined __X86INTRIN_H && !defined __IMMINTRIN_H
#ifndef __X86INTRIN_H
#error "Never use <clzerointrin.h> directly; include <x86intrin.h> instead."
#endif
@@ -17,14 +17,16 @@
#define __DEFAULT_FN_ATTRS \
__attribute__((__always_inline__, __nodebug__, __target__("clzero")))
/// Loads the cache line address and zero's out the cacheline
/// Zeroes out the cache line for the address \a __line. This uses a
/// non-temporal store. Calling \c _mm_sfence() afterward might be needed
/// to enforce ordering.
///
/// \headerfile <clzerointrin.h>
/// \headerfile <x86intrin.h>
///
/// This intrinsic corresponds to the <c> CLZERO </c> instruction.
/// This intrinsic corresponds to the \c CLZERO instruction.
///
/// \param __line
/// A pointer to a cacheline which needs to be zeroed out.
/// An address within the cache line to zero out.
static __inline__ void __DEFAULT_FN_ATTRS
_mm_clzero (void * __line)
{
+9
View File
@@ -0,0 +1,9 @@
// CUDA headers define __noinline__ which interferes with libstdc++'s use of
// `__attribute((__noinline__))`. In order to avoid compilation error,
// temporarily unset __noinline__ when we include affected libstdc++ header.
#pragma push_macro("__noinline__")
#undef __noinline__
#include_next "bits/shared_ptr_base.h"
#pragma pop_macro("__noinline__")
+564
View File
@@ -18,192 +18,756 @@
#define __DEFAULT_FN_ATTRS128 __attribute__((__always_inline__, __nodebug__, __target__("fma"), __min_vector_width__(128)))
#define __DEFAULT_FN_ATTRS256 __attribute__((__always_inline__, __nodebug__, __target__("fma"), __min_vector_width__(256)))
/// Computes a multiply-add of 128-bit vectors of [4 x float].
/// For each element, computes <c> (__A * __B) + __C </c>.
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VFMADD213PS instruction.
///
/// \param __A
/// A 128-bit vector of [4 x float] containing the multiplicand.
/// \param __B
/// A 128-bit vector of [4 x float] containing the multiplier.
/// \param __C
/// A 128-bit vector of [4 x float] containing the addend.
/// \returns A 128-bit vector of [4 x float] containing the result.
static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_fmadd_ps(__m128 __A, __m128 __B, __m128 __C)
{
return (__m128)__builtin_ia32_vfmaddps((__v4sf)__A, (__v4sf)__B, (__v4sf)__C);
}
/// Computes a multiply-add of 128-bit vectors of [2 x double].
/// For each element, computes <c> (__A * __B) + __C </c>.
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VFMADD213PD instruction.
///
/// \param __A
/// A 128-bit vector of [2 x double] containing the multiplicand.
/// \param __B
/// A 128-bit vector of [2 x double] containing the multiplier.
/// \param __C
/// A 128-bit vector of [2 x double] containing the addend.
/// \returns A 128-bit [2 x double] vector containing the result.
static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_fmadd_pd(__m128d __A, __m128d __B, __m128d __C)
{
return (__m128d)__builtin_ia32_vfmaddpd((__v2df)__A, (__v2df)__B, (__v2df)__C);
}
/// Computes a scalar multiply-add of the single-precision values in the
/// low 32 bits of 128-bit vectors of [4 x float].
/// \code
/// result[31:0] = (__A[31:0] * __B[31:0]) + __C[31:0]
/// result[127:32] = __A[127:32]
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VFMADD213SS instruction.
///
/// \param __A
/// A 128-bit vector of [4 x float] containing the multiplicand in the low
/// 32 bits.
/// \param __B
/// A 128-bit vector of [4 x float] containing the multiplier in the low
/// 32 bits.
/// \param __C
/// A 128-bit vector of [4 x float] containing the addend in the low
/// 32 bits.
/// \returns A 128-bit vector of [4 x float] containing the result in the low
/// 32 bits and a copy of \a __A[127:32] in the upper 96 bits.
static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_fmadd_ss(__m128 __A, __m128 __B, __m128 __C)
{
return (__m128)__builtin_ia32_vfmaddss3((__v4sf)__A, (__v4sf)__B, (__v4sf)__C);
}
/// Computes a scalar multiply-add of the double-precision values in the
/// low 64 bits of 128-bit vectors of [2 x double].
/// \code
/// result[63:0] = (__A[63:0] * __B[63:0]) + __C[63:0]
/// result[127:64] = __A[127:64]
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VFMADD213SD instruction.
///
/// \param __A
/// A 128-bit vector of [2 x double] containing the multiplicand in the low
/// 64 bits.
/// \param __B
/// A 128-bit vector of [2 x double] containing the multiplier in the low
/// 64 bits.
/// \param __C
/// A 128-bit vector of [2 x double] containing the addend in the low
/// 64 bits.
/// \returns A 128-bit vector of [2 x double] containing the result in the low
/// 64 bits and a copy of \a __A[127:64] in the upper 64 bits.
static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_fmadd_sd(__m128d __A, __m128d __B, __m128d __C)
{
return (__m128d)__builtin_ia32_vfmaddsd3((__v2df)__A, (__v2df)__B, (__v2df)__C);
}
/// Computes a multiply-subtract of 128-bit vectors of [4 x float].
/// For each element, computes <c> (__A * __B) - __C </c>.
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VFMSUB213PS instruction.
///
/// \param __A
/// A 128-bit vector of [4 x float] containing the multiplicand.
/// \param __B
/// A 128-bit vector of [4 x float] containing the multiplier.
/// \param __C
/// A 128-bit vector of [4 x float] containing the subtrahend.
/// \returns A 128-bit vector of [4 x float] containing the result.
static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_fmsub_ps(__m128 __A, __m128 __B, __m128 __C)
{
return (__m128)__builtin_ia32_vfmaddps((__v4sf)__A, (__v4sf)__B, -(__v4sf)__C);
}
/// Computes a multiply-subtract of 128-bit vectors of [2 x double].
/// For each element, computes <c> (__A * __B) - __C </c>.
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VFMSUB213PD instruction.
///
/// \param __A
/// A 128-bit vector of [2 x double] containing the multiplicand.
/// \param __B
/// A 128-bit vector of [2 x double] containing the multiplier.
/// \param __C
/// A 128-bit vector of [2 x double] containing the addend.
/// \returns A 128-bit vector of [2 x double] containing the result.
static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_fmsub_pd(__m128d __A, __m128d __B, __m128d __C)
{
return (__m128d)__builtin_ia32_vfmaddpd((__v2df)__A, (__v2df)__B, -(__v2df)__C);
}
/// Computes a scalar multiply-subtract of the single-precision values in
/// the low 32 bits of 128-bit vectors of [4 x float].
/// \code
/// result[31:0] = (__A[31:0] * __B[31:0]) - __C[31:0]
/// result[127:32] = __A[127:32]
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VFMSUB213SS instruction.
///
/// \param __A
/// A 128-bit vector of [4 x float] containing the multiplicand in the low
/// 32 bits.
/// \param __B
/// A 128-bit vector of [4 x float] containing the multiplier in the low
/// 32 bits.
/// \param __C
/// A 128-bit vector of [4 x float] containing the subtrahend in the low
/// 32 bits.
/// \returns A 128-bit vector of [4 x float] containing the result in the low
/// 32 bits, and a copy of \a __A[127:32] in the upper 96 bits.
static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_fmsub_ss(__m128 __A, __m128 __B, __m128 __C)
{
return (__m128)__builtin_ia32_vfmaddss3((__v4sf)__A, (__v4sf)__B, -(__v4sf)__C);
}
/// Computes a scalar multiply-subtract of the double-precision values in
/// the low 64 bits of 128-bit vectors of [2 x double].
/// \code
/// result[63:0] = (__A[63:0] * __B[63:0]) - __C[63:0]
/// result[127:64] = __A[127:64]
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VFMSUB213SD instruction.
///
/// \param __A
/// A 128-bit vector of [2 x double] containing the multiplicand in the low
/// 64 bits.
/// \param __B
/// A 128-bit vector of [2 x double] containing the multiplier in the low
/// 64 bits.
/// \param __C
/// A 128-bit vector of [2 x double] containing the subtrahend in the low
/// 64 bits.
/// \returns A 128-bit vector of [2 x double] containing the result in the low
/// 64 bits, and a copy of \a __A[127:64] in the upper 64 bits.
static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_fmsub_sd(__m128d __A, __m128d __B, __m128d __C)
{
return (__m128d)__builtin_ia32_vfmaddsd3((__v2df)__A, (__v2df)__B, -(__v2df)__C);
}
/// Computes a negated multiply-add of 128-bit vectors of [4 x float].
/// For each element, computes <c> -(__A * __B) + __C </c>.
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VFNMADD213DPS instruction.
///
/// \param __A
/// A 128-bit vector of [4 x float] containing the multiplicand.
/// \param __B
/// A 128-bit vector of [4 x float] containing the multiplier.
/// \param __C
/// A 128-bit vector of [4 x float] containing the addend.
/// \returns A 128-bit [4 x float] vector containing the result.
static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_fnmadd_ps(__m128 __A, __m128 __B, __m128 __C)
{
return (__m128)__builtin_ia32_vfmaddps(-(__v4sf)__A, (__v4sf)__B, (__v4sf)__C);
}
/// Computes a negated multiply-add of 128-bit vectors of [2 x double].
/// For each element, computes <c> -(__A * __B) + __C </c>.
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VFNMADD213PD instruction.
///
/// \param __A
/// A 128-bit vector of [2 x double] containing the multiplicand.
/// \param __B
/// A 128-bit vector of [2 x double] containing the multiplier.
/// \param __C
/// A 128-bit vector of [2 x double] containing the addend.
/// \returns A 128-bit vector of [2 x double] containing the result.
static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_fnmadd_pd(__m128d __A, __m128d __B, __m128d __C)
{
return (__m128d)__builtin_ia32_vfmaddpd(-(__v2df)__A, (__v2df)__B, (__v2df)__C);
}
/// Computes a scalar negated multiply-add of the single-precision values in
/// the low 32 bits of 128-bit vectors of [4 x float].
/// \code
/// result[31:0] = -(__A[31:0] * __B[31:0]) + __C[31:0]
/// result[127:32] = __A[127:32]
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VFNMADD213SS instruction.
///
/// \param __A
/// A 128-bit vector of [4 x float] containing the multiplicand in the low
/// 32 bits.
/// \param __B
/// A 128-bit vector of [4 x float] containing the multiplier in the low
/// 32 bits.
/// \param __C
/// A 128-bit vector of [4 x float] containing the addend in the low
/// 32 bits.
/// \returns A 128-bit vector of [4 x float] containing the result in the low
/// 32 bits, and a copy of \a __A[127:32] in the upper 96 bits.
static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_fnmadd_ss(__m128 __A, __m128 __B, __m128 __C)
{
return (__m128)__builtin_ia32_vfmaddss3((__v4sf)__A, -(__v4sf)__B, (__v4sf)__C);
}
/// Computes a scalar negated multiply-add of the double-precision values
/// in the low 64 bits of 128-bit vectors of [2 x double].
/// \code
/// result[63:0] = -(__A[63:0] * __B[63:0]) + __C[63:0]
/// result[127:64] = __A[127:64]
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VFNMADD213SD instruction.
///
/// \param __A
/// A 128-bit vector of [2 x double] containing the multiplicand in the low
/// 64 bits.
/// \param __B
/// A 128-bit vector of [2 x double] containing the multiplier in the low
/// 64 bits.
/// \param __C
/// A 128-bit vector of [2 x double] containing the addend in the low
/// 64 bits.
/// \returns A 128-bit vector of [2 x double] containing the result in the low
/// 64 bits, and a copy of \a __A[127:64] in the upper 64 bits.
static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_fnmadd_sd(__m128d __A, __m128d __B, __m128d __C)
{
return (__m128d)__builtin_ia32_vfmaddsd3((__v2df)__A, -(__v2df)__B, (__v2df)__C);
}
/// Computes a negated multiply-subtract of 128-bit vectors of [4 x float].
/// For each element, computes <c> -(__A * __B) - __C </c>.
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VFNMSUB213PS instruction.
///
/// \param __A
/// A 128-bit vector of [4 x float] containing the multiplicand.
/// \param __B
/// A 128-bit vector of [4 x float] containing the multiplier.
/// \param __C
/// A 128-bit vector of [4 x float] containing the subtrahend.
/// \returns A 128-bit vector of [4 x float] containing the result.
static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_fnmsub_ps(__m128 __A, __m128 __B, __m128 __C)
{
return (__m128)__builtin_ia32_vfmaddps(-(__v4sf)__A, (__v4sf)__B, -(__v4sf)__C);
}
/// Computes a negated multiply-subtract of 128-bit vectors of [2 x double].
/// For each element, computes <c> -(__A * __B) - __C </c>.
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VFNMSUB213PD instruction.
///
/// \param __A
/// A 128-bit vector of [2 x double] containing the multiplicand.
/// \param __B
/// A 128-bit vector of [2 x double] containing the multiplier.
/// \param __C
/// A 128-bit vector of [2 x double] containing the subtrahend.
/// \returns A 128-bit vector of [2 x double] containing the result.
static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_fnmsub_pd(__m128d __A, __m128d __B, __m128d __C)
{
return (__m128d)__builtin_ia32_vfmaddpd(-(__v2df)__A, (__v2df)__B, -(__v2df)__C);
}
/// Computes a scalar negated multiply-subtract of the single-precision
/// values in the low 32 bits of 128-bit vectors of [4 x float].
/// \code
/// result[31:0] = -(__A[31:0] * __B[31:0]) - __C[31:0]
/// result[127:32] = __A[127:32]
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VFNMSUB213SS instruction.
///
/// \param __A
/// A 128-bit vector of [4 x float] containing the multiplicand in the low
/// 32 bits.
/// \param __B
/// A 128-bit vector of [4 x float] containing the multiplier in the low
/// 32 bits.
/// \param __C
/// A 128-bit vector of [4 x float] containing the subtrahend in the low
/// 32 bits.
/// \returns A 128-bit vector of [4 x float] containing the result in the low
/// 32 bits, and a copy of \a __A[127:32] in the upper 96 bits.
static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_fnmsub_ss(__m128 __A, __m128 __B, __m128 __C)
{
return (__m128)__builtin_ia32_vfmaddss3((__v4sf)__A, -(__v4sf)__B, -(__v4sf)__C);
}
/// Computes a scalar negated multiply-subtract of the double-precision
/// values in the low 64 bits of 128-bit vectors of [2 x double].
/// \code
/// result[63:0] = -(__A[63:0] * __B[63:0]) - __C[63:0]
/// result[127:64] = __A[127:64]
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VFNMSUB213SD instruction.
///
/// \param __A
/// A 128-bit vector of [2 x double] containing the multiplicand in the low
/// 64 bits.
/// \param __B
/// A 128-bit vector of [2 x double] containing the multiplier in the low
/// 64 bits.
/// \param __C
/// A 128-bit vector of [2 x double] containing the subtrahend in the low
/// 64 bits.
/// \returns A 128-bit vector of [2 x double] containing the result in the low
/// 64 bits, and a copy of \a __A[127:64] in the upper 64 bits.
static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_fnmsub_sd(__m128d __A, __m128d __B, __m128d __C)
{
return (__m128d)__builtin_ia32_vfmaddsd3((__v2df)__A, -(__v2df)__B, -(__v2df)__C);
}
/// Computes a multiply with alternating add/subtract of 128-bit vectors of
/// [4 x float].
/// \code
/// result[31:0] = (__A[31:0] * __B[31:0]) - __C[31:0]
/// result[63:32] = (__A[63:32] * __B[63:32]) + __C[63:32]
/// result[95:64] = (__A[95:64] * __B[95:64]) - __C[95:64]
/// result[127:96] = (__A[127:96] * __B[127:96]) + __C[127:96]
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VFMADDSUB213PS instruction.
///
/// \param __A
/// A 128-bit vector of [4 x float] containing the multiplicand.
/// \param __B
/// A 128-bit vector of [4 x float] containing the multiplier.
/// \param __C
/// A 128-bit vector of [4 x float] containing the addend/subtrahend.
/// \returns A 128-bit vector of [4 x float] containing the result.
static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_fmaddsub_ps(__m128 __A, __m128 __B, __m128 __C)
{
return (__m128)__builtin_ia32_vfmaddsubps((__v4sf)__A, (__v4sf)__B, (__v4sf)__C);
}
/// Computes a multiply with alternating add/subtract of 128-bit vectors of
/// [2 x double].
/// \code
/// result[63:0] = (__A[63:0] * __B[63:0]) - __C[63:0]
/// result[127:64] = (__A[127:64] * __B[127:64]) + __C[127:64]
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VFMADDSUB213PD instruction.
///
/// \param __A
/// A 128-bit vector of [2 x double] containing the multiplicand.
/// \param __B
/// A 128-bit vector of [2 x double] containing the multiplier.
/// \param __C
/// A 128-bit vector of [2 x double] containing the addend/subtrahend.
/// \returns A 128-bit vector of [2 x double] containing the result.
static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_fmaddsub_pd(__m128d __A, __m128d __B, __m128d __C)
{
return (__m128d)__builtin_ia32_vfmaddsubpd((__v2df)__A, (__v2df)__B, (__v2df)__C);
}
/// Computes a multiply with alternating add/subtract of 128-bit vectors of
/// [4 x float].
/// \code
/// result[31:0] = (__A[31:0] * __B[31:0]) + __C[31:0]
/// result[63:32] = (__A[63:32] * __B[63:32]) - __C[63:32]
/// result[95:64] = (__A[95:64] * __B[95:64]) + __C[95:64]
/// result[127:96 = (__A[127:96] * __B[127:96]) - __C[127:96]
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VFMSUBADD213PS instruction.
///
/// \param __A
/// A 128-bit vector of [4 x float] containing the multiplicand.
/// \param __B
/// A 128-bit vector of [4 x float] containing the multiplier.
/// \param __C
/// A 128-bit vector of [4 x float] containing the addend/subtrahend.
/// \returns A 128-bit vector of [4 x float] containing the result.
static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_fmsubadd_ps(__m128 __A, __m128 __B, __m128 __C)
{
return (__m128)__builtin_ia32_vfmaddsubps((__v4sf)__A, (__v4sf)__B, -(__v4sf)__C);
}
/// Computes a multiply with alternating add/subtract of 128-bit vectors of
/// [2 x double].
/// \code
/// result[63:0] = (__A[63:0] * __B[63:0]) + __C[63:0]
/// result[127:64] = (__A[127:64] * __B[127:64]) - __C[127:64]
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VFMADDSUB213PD instruction.
///
/// \param __A
/// A 128-bit vector of [2 x double] containing the multiplicand.
/// \param __B
/// A 128-bit vector of [2 x double] containing the multiplier.
/// \param __C
/// A 128-bit vector of [2 x double] containing the addend/subtrahend.
/// \returns A 128-bit vector of [2 x double] containing the result.
static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_fmsubadd_pd(__m128d __A, __m128d __B, __m128d __C)
{
return (__m128d)__builtin_ia32_vfmaddsubpd((__v2df)__A, (__v2df)__B, -(__v2df)__C);
}
/// Computes a multiply-add of 256-bit vectors of [8 x float].
/// For each element, computes <c> (__A * __B) + __C </c>.
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VFMADD213PS instruction.
///
/// \param __A
/// A 256-bit vector of [8 x float] containing the multiplicand.
/// \param __B
/// A 256-bit vector of [8 x float] containing the multiplier.
/// \param __C
/// A 256-bit vector of [8 x float] containing the addend.
/// \returns A 256-bit vector of [8 x float] containing the result.
static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_fmadd_ps(__m256 __A, __m256 __B, __m256 __C)
{
return (__m256)__builtin_ia32_vfmaddps256((__v8sf)__A, (__v8sf)__B, (__v8sf)__C);
}
/// Computes a multiply-add of 256-bit vectors of [4 x double].
/// For each element, computes <c> (__A * __B) + __C </c>.
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VFMADD213PD instruction.
///
/// \param __A
/// A 256-bit vector of [4 x double] containing the multiplicand.
/// \param __B
/// A 256-bit vector of [4 x double] containing the multiplier.
/// \param __C
/// A 256-bit vector of [4 x double] containing the addend.
/// \returns A 256-bit vector of [4 x double] containing the result.
static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_fmadd_pd(__m256d __A, __m256d __B, __m256d __C)
{
return (__m256d)__builtin_ia32_vfmaddpd256((__v4df)__A, (__v4df)__B, (__v4df)__C);
}
/// Computes a multiply-subtract of 256-bit vectors of [8 x float].
/// For each element, computes <c> (__A * __B) - __C </c>.
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VFMSUB213PS instruction.
///
/// \param __A
/// A 256-bit vector of [8 x float] containing the multiplicand.
/// \param __B
/// A 256-bit vector of [8 x float] containing the multiplier.
/// \param __C
/// A 256-bit vector of [8 x float] containing the subtrahend.
/// \returns A 256-bit vector of [8 x float] containing the result.
static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_fmsub_ps(__m256 __A, __m256 __B, __m256 __C)
{
return (__m256)__builtin_ia32_vfmaddps256((__v8sf)__A, (__v8sf)__B, -(__v8sf)__C);
}
/// Computes a multiply-subtract of 256-bit vectors of [4 x double].
/// For each element, computes <c> (__A * __B) - __C </c>.
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VFMSUB213PD instruction.
///
/// \param __A
/// A 256-bit vector of [4 x double] containing the multiplicand.
/// \param __B
/// A 256-bit vector of [4 x double] containing the multiplier.
/// \param __C
/// A 256-bit vector of [4 x double] containing the subtrahend.
/// \returns A 256-bit vector of [4 x double] containing the result.
static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_fmsub_pd(__m256d __A, __m256d __B, __m256d __C)
{
return (__m256d)__builtin_ia32_vfmaddpd256((__v4df)__A, (__v4df)__B, -(__v4df)__C);
}
/// Computes a negated multiply-add of 256-bit vectors of [8 x float].
/// For each element, computes <c> -(__A * __B) + __C </c>.
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VFNMADD213PS instruction.
///
/// \param __A
/// A 256-bit vector of [8 x float] containing the multiplicand.
/// \param __B
/// A 256-bit vector of [8 x float] containing the multiplier.
/// \param __C
/// A 256-bit vector of [8 x float] containing the addend.
/// \returns A 256-bit vector of [8 x float] containing the result.
static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_fnmadd_ps(__m256 __A, __m256 __B, __m256 __C)
{
return (__m256)__builtin_ia32_vfmaddps256(-(__v8sf)__A, (__v8sf)__B, (__v8sf)__C);
}
/// Computes a negated multiply-add of 256-bit vectors of [4 x double].
/// For each element, computes <c> -(__A * __B) + __C </c>.
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VFNMADD213PD instruction.
///
/// \param __A
/// A 256-bit vector of [4 x double] containing the multiplicand.
/// \param __B
/// A 256-bit vector of [4 x double] containing the multiplier.
/// \param __C
/// A 256-bit vector of [4 x double] containing the addend.
/// \returns A 256-bit vector of [4 x double] containing the result.
static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_fnmadd_pd(__m256d __A, __m256d __B, __m256d __C)
{
return (__m256d)__builtin_ia32_vfmaddpd256(-(__v4df)__A, (__v4df)__B, (__v4df)__C);
}
/// Computes a negated multiply-subtract of 256-bit vectors of [8 x float].
/// For each element, computes <c> -(__A * __B) - __C </c>.
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VFNMSUB213PS instruction.
///
/// \param __A
/// A 256-bit vector of [8 x float] containing the multiplicand.
/// \param __B
/// A 256-bit vector of [8 x float] containing the multiplier.
/// \param __C
/// A 256-bit vector of [8 x float] containing the subtrahend.
/// \returns A 256-bit vector of [8 x float] containing the result.
static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_fnmsub_ps(__m256 __A, __m256 __B, __m256 __C)
{
return (__m256)__builtin_ia32_vfmaddps256(-(__v8sf)__A, (__v8sf)__B, -(__v8sf)__C);
}
/// Computes a negated multiply-subtract of 256-bit vectors of [4 x double].
/// For each element, computes <c> -(__A * __B) - __C </c>.
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VFNMSUB213PD instruction.
///
/// \param __A
/// A 256-bit vector of [4 x double] containing the multiplicand.
/// \param __B
/// A 256-bit vector of [4 x double] containing the multiplier.
/// \param __C
/// A 256-bit vector of [4 x double] containing the subtrahend.
/// \returns A 256-bit vector of [4 x double] containing the result.
static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_fnmsub_pd(__m256d __A, __m256d __B, __m256d __C)
{
return (__m256d)__builtin_ia32_vfmaddpd256(-(__v4df)__A, (__v4df)__B, -(__v4df)__C);
}
/// Computes a multiply with alternating add/subtract of 256-bit vectors of
/// [8 x float].
/// \code
/// result[31:0] = (__A[31:0] * __B[31:0]) - __C[31:0]
/// result[63:32] = (__A[63:32] * __B[63:32]) + __C[63:32]
/// result[95:64] = (__A[95:64] * __B[95:64]) - __C[95:64]
/// result[127:96] = (__A[127:96] * __B[127:96]) + __C[127:96]
/// result[159:128] = (__A[159:128] * __B[159:128]) - __C[159:128]
/// result[191:160] = (__A[191:160] * __B[191:160]) + __C[191:160]
/// result[223:192] = (__A[223:192] * __B[223:192]) - __C[223:192]
/// result[255:224] = (__A[255:224] * __B[255:224]) + __C[255:224]
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VFMADDSUB213PS instruction.
///
/// \param __A
/// A 256-bit vector of [8 x float] containing the multiplicand.
/// \param __B
/// A 256-bit vector of [8 x float] containing the multiplier.
/// \param __C
/// A 256-bit vector of [8 x float] containing the addend/subtrahend.
/// \returns A 256-bit vector of [8 x float] containing the result.
static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_fmaddsub_ps(__m256 __A, __m256 __B, __m256 __C)
{
return (__m256)__builtin_ia32_vfmaddsubps256((__v8sf)__A, (__v8sf)__B, (__v8sf)__C);
}
/// Computes a multiply with alternating add/subtract of 256-bit vectors of
/// [4 x double].
/// \code
/// result[63:0] = (__A[63:0] * __B[63:0]) - __C[63:0]
/// result[127:64] = (__A[127:64] * __B[127:64]) + __C[127:64]
/// result[191:128] = (__A[191:128] * __B[191:128]) - __C[191:128]
/// result[255:192] = (__A[255:192] * __B[255:192]) + __C[255:192]
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VFMADDSUB213PD instruction.
///
/// \param __A
/// A 256-bit vector of [4 x double] containing the multiplicand.
/// \param __B
/// A 256-bit vector of [4 x double] containing the multiplier.
/// \param __C
/// A 256-bit vector of [4 x double] containing the addend/subtrahend.
/// \returns A 256-bit vector of [4 x double] containing the result.
static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_fmaddsub_pd(__m256d __A, __m256d __B, __m256d __C)
{
return (__m256d)__builtin_ia32_vfmaddsubpd256((__v4df)__A, (__v4df)__B, (__v4df)__C);
}
/// Computes a vector multiply with alternating add/subtract of 256-bit
/// vectors of [8 x float].
/// \code
/// result[31:0] = (__A[31:0] * __B[31:0]) + __C[31:0]
/// result[63:32] = (__A[63:32] * __B[63:32]) - __C[63:32]
/// result[95:64] = (__A[95:64] * __B[95:64]) + __C[95:64]
/// result[127:96] = (__A[127:96] * __B[127:96]) - __C[127:96]
/// result[159:128] = (__A[159:128] * __B[159:128]) + __C[159:128]
/// result[191:160] = (__A[191:160] * __B[191:160]) - __C[191:160]
/// result[223:192] = (__A[223:192] * __B[223:192]) + __C[223:192]
/// result[255:224] = (__A[255:224] * __B[255:224]) - __C[255:224]
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VFMSUBADD213PS instruction.
///
/// \param __A
/// A 256-bit vector of [8 x float] containing the multiplicand.
/// \param __B
/// A 256-bit vector of [8 x float] containing the multiplier.
/// \param __C
/// A 256-bit vector of [8 x float] containing the addend/subtrahend.
/// \returns A 256-bit vector of [8 x float] containing the result.
static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_fmsubadd_ps(__m256 __A, __m256 __B, __m256 __C)
{
return (__m256)__builtin_ia32_vfmaddsubps256((__v8sf)__A, (__v8sf)__B, -(__v8sf)__C);
}
/// Computes a vector multiply with alternating add/subtract of 256-bit
/// vectors of [4 x double].
/// \code
/// result[63:0] = (__A[63:0] * __B[63:0]) + __C[63:0]
/// result[127:64] = (__A[127:64] * __B[127:64]) - __C[127:64]
/// result[191:128] = (__A[191:128] * __B[191:128]) + __C[191:128]
/// result[255:192] = (__A[255:192] * __B[255:192]) - __C[255:192]
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c VFMSUBADD213PD instruction.
///
/// \param __A
/// A 256-bit vector of [4 x double] containing the multiplicand.
/// \param __B
/// A 256-bit vector of [4 x double] containing the multiplier.
/// \param __C
/// A 256-bit vector of [4 x double] containing the addend/subtrahend.
/// \returns A 256-bit vector of [4 x double] containing the result.
static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_fmsubadd_pd(__m256d __A, __m256d __B, __m256d __C)
{
+118 -10
View File
@@ -269,6 +269,26 @@
#include <avxneconvertintrin.h>
#endif
#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
defined(__SHA512__)
#include <sha512intrin.h>
#endif
#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
defined(__SM3__)
#include <sm3intrin.h>
#endif
#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
defined(__SM4__)
#include <sm4intrin.h>
#endif
#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
defined(__AVXVNNIINT16__)
#include <avxvnniint16intrin.h>
#endif
#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
defined(__RDPID__)
/// Returns the value of the IA32_TSC_AUX MSR (0xc0000103).
@@ -284,30 +304,53 @@ _rdpid_u32(void) {
#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
defined(__RDRND__)
/// Returns a 16-bit hardware-generated random value.
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the <c> RDRAND </c> instruction.
///
/// \param __p
/// A pointer to a 16-bit memory location to place the random value.
/// \returns 1 if the value was successfully generated, 0 otherwise.
static __inline__ int __attribute__((__always_inline__, __nodebug__, __target__("rdrnd")))
_rdrand16_step(unsigned short *__p)
{
return (int)__builtin_ia32_rdrand16_step(__p);
}
/// Returns a 32-bit hardware-generated random value.
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the <c> RDRAND </c> instruction.
///
/// \param __p
/// A pointer to a 32-bit memory location to place the random value.
/// \returns 1 if the value was successfully generated, 0 otherwise.
static __inline__ int __attribute__((__always_inline__, __nodebug__, __target__("rdrnd")))
_rdrand32_step(unsigned int *__p)
{
return (int)__builtin_ia32_rdrand32_step(__p);
}
/// Returns a 64-bit hardware-generated random value.
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the <c> RDRAND </c> instruction.
///
/// \param __p
/// A pointer to a 64-bit memory location to place the random value.
/// \returns 1 if the value was successfully generated, 0 otherwise.
static __inline__ int __attribute__((__always_inline__, __nodebug__, __target__("rdrnd")))
_rdrand64_step(unsigned long long *__p)
{
#ifdef __x86_64__
static __inline__ int __attribute__((__always_inline__, __nodebug__, __target__("rdrnd")))
_rdrand64_step(unsigned long long *__p)
{
return (int)__builtin_ia32_rdrand64_step(__p);
}
#else
// We need to emulate the functionality of 64-bit rdrand with 2 32-bit
// rdrand instructions.
static __inline__ int __attribute__((__always_inline__, __nodebug__, __target__("rdrnd")))
_rdrand64_step(unsigned long long *__p)
{
// We need to emulate the functionality of 64-bit rdrand with 2 32-bit
// rdrand instructions.
unsigned int __lo, __hi;
unsigned int __res_lo = __builtin_ia32_rdrand32_step(&__lo);
unsigned int __res_hi = __builtin_ia32_rdrand32_step(&__hi);
@@ -318,55 +361,115 @@ _rdrand64_step(unsigned long long *__p)
*__p = 0;
return 0;
}
}
#endif
}
#endif /* __RDRND__ */
#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
defined(__FSGSBASE__)
#ifdef __x86_64__
/// Reads the FS base register.
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the <c> RDFSBASE </c> instruction.
///
/// \returns The lower 32 bits of the FS base register.
static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase")))
_readfsbase_u32(void)
{
return __builtin_ia32_rdfsbase32();
}
/// Reads the FS base register.
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the <c> RDFSBASE </c> instruction.
///
/// \returns The contents of the FS base register.
static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase")))
_readfsbase_u64(void)
{
return __builtin_ia32_rdfsbase64();
}
/// Reads the GS base register.
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the <c> RDGSBASE </c> instruction.
///
/// \returns The lower 32 bits of the GS base register.
static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase")))
_readgsbase_u32(void)
{
return __builtin_ia32_rdgsbase32();
}
/// Reads the GS base register.
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the <c> RDGSBASE </c> instruction.
///
/// \returns The contents of the GS base register.
static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase")))
_readgsbase_u64(void)
{
return __builtin_ia32_rdgsbase64();
}
/// Modifies the FS base register.
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the <c> WRFSBASE </c> instruction.
///
/// \param __V
/// Value to use for the lower 32 bits of the FS base register.
static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase")))
_writefsbase_u32(unsigned int __V)
{
__builtin_ia32_wrfsbase32(__V);
}
/// Modifies the FS base register.
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the <c> WRFSBASE </c> instruction.
///
/// \param __V
/// Value to use for the FS base register.
static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase")))
_writefsbase_u64(unsigned long long __V)
{
__builtin_ia32_wrfsbase64(__V);
}
/// Modifies the GS base register.
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the <c> WRGSBASE </c> instruction.
///
/// \param __V
/// Value to use for the lower 32 bits of the GS base register.
static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase")))
_writegsbase_u32(unsigned int __V)
{
__builtin_ia32_wrgsbase32(__V);
}
/// Modifies the GS base register.
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the <c> WRFSBASE </c> instruction.
///
/// \param __V
/// Value to use for GS base register.
static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase")))
_writegsbase_u64(unsigned long long __V)
{
@@ -538,6 +641,11 @@ _storebe_i64(void * __P, long long __D) {
#include <amxintrin.h>
#endif
#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
defined(__AMX_COMPLEX__)
#include <amxcomplexintrin.h>
#endif
#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
defined(__AVX512VP2INTERSECT__)
#include <avx512vp2intersectintrin.h>
+5 -1
View File
@@ -52,7 +52,11 @@
#define LONG_MIN (-__LONG_MAX__ -1L)
#define UCHAR_MAX (__SCHAR_MAX__*2 +1)
#define USHRT_MAX (__SHRT_MAX__ *2 +1)
#if __SHRT_WIDTH__ < __INT_WIDTH__
#define USHRT_MAX (__SHRT_MAX__ * 2 + 1)
#else
#define USHRT_MAX (__SHRT_MAX__ * 2U + 1U)
#endif
#define UINT_MAX (__INT_MAX__ *2U +1U)
#define ULONG_MAX (__LONG_MAX__ *2UL+1UL)
+85
View File
@@ -0,0 +1,85 @@
//===-- Wrapper for C standard ctype.h declarations on the GPU ------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#ifndef __CLANG_LLVM_LIBC_WRAPPERS_CTYPE_H__
#define __CLANG_LLVM_LIBC_WRAPPERS_CTYPE_H__
#if !defined(_OPENMP) && !defined(__HIP__) && !defined(__CUDA__)
#error "This file is for GPU offloading compilation only"
#endif
#include_next <ctype.h>
#if __has_include(<llvm-libc-decls/ctype.h>)
#if defined(__HIP__) || defined(__CUDA__)
#define __LIBC_ATTRS __attribute__((device))
#endif
// The GNU headers like to provide these as macros, we need to undefine them so
// they do not conflict with the following definitions for the GPU.
#pragma push_macro("isalnum")
#pragma push_macro("isalpha")
#pragma push_macro("isblank")
#pragma push_macro("iscntrl")
#pragma push_macro("isdigit")
#pragma push_macro("isgraph")
#pragma push_macro("islower")
#pragma push_macro("isprint")
#pragma push_macro("ispunct")
#pragma push_macro("isspace")
#pragma push_macro("isupper")
#pragma push_macro("isxdigit")
#pragma push_macro("tolower")
#pragma push_macro("toupper")
#undef isalnum
#undef isalpha
#undef iscntrl
#undef isdigit
#undef islower
#undef isgraph
#undef isprint
#undef ispunct
#undef isspace
#undef isupper
#undef isblank
#undef isxdigit
#undef tolower
#undef toupper
#pragma omp begin declare target
#include <llvm-libc-decls/ctype.h>
#pragma omp end declare target
// Restore the original macros when compiling on the host.
#if !defined(__NVPTX__) && !defined(__AMDGPU__)
#pragma pop_macro("isalnum")
#pragma pop_macro("isalpha")
#pragma pop_macro("isblank")
#pragma pop_macro("iscntrl")
#pragma pop_macro("isdigit")
#pragma pop_macro("isgraph")
#pragma pop_macro("islower")
#pragma pop_macro("isprint")
#pragma pop_macro("ispunct")
#pragma pop_macro("isspace")
#pragma pop_macro("isupper")
#pragma pop_macro("isxdigit")
#pragma pop_macro("tolower")
#pragma pop_macro("toupper")
#endif
#undef __LIBC_ATTRS
#endif
#endif // __CLANG_LLVM_LIBC_WRAPPERS_CTYPE_H__
+34
View File
@@ -0,0 +1,34 @@
//===-- Wrapper for C standard inttypes.h declarations on the GPU ---------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#ifndef __CLANG_LLVM_LIBC_WRAPPERS_INTTYPES_H__
#define __CLANG_LLVM_LIBC_WRAPPERS_INTTYPES_H__
#if !defined(_OPENMP) && !defined(__HIP__) && !defined(__CUDA__)
#error "This file is for GPU offloading compilation only"
#endif
#include_next <inttypes.h>
#if __has_include(<llvm-libc-decls/inttypes.h>)
#if defined(__HIP__) || defined(__CUDA__)
#define __LIBC_ATTRS __attribute__((device))
#endif
#pragma omp begin declare target
#include <llvm-libc-decls/inttypes.h>
#pragma omp end declare target
#undef __LIBC_ATTRS
#endif
#endif // __CLANG_LLVM_LIBC_WRAPPERS_INTTYPES_H__
+34
View File
@@ -0,0 +1,34 @@
//===-- Wrapper for C standard stdio.h declarations on the GPU ------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#ifndef __CLANG_LLVM_LIBC_WRAPPERS_STDIO_H__
#define __CLANG_LLVM_LIBC_WRAPPERS_STDIO_H__
#if !defined(_OPENMP) && !defined(__HIP__) && !defined(__CUDA__)
#error "This file is for GPU offloading compilation only"
#endif
#include_next <stdio.h>
#if __has_include(<llvm-libc-decls/stdio.h>)
#if defined(__HIP__) || defined(__CUDA__)
#define __LIBC_ATTRS __attribute__((device))
#endif
#pragma omp begin declare target
#include <llvm-libc-decls/stdio.h>
#pragma omp end declare target
#undef __LIBC_ATTRS
#endif
#endif // __CLANG_LLVM_LIBC_WRAPPERS_STDIO_H__
+42
View File
@@ -0,0 +1,42 @@
//===-- Wrapper for C standard stdlib.h declarations on the GPU -----------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#ifndef __CLANG_LLVM_LIBC_WRAPPERS_STDLIB_H__
#define __CLANG_LLVM_LIBC_WRAPPERS_STDLIB_H__
#if !defined(_OPENMP) && !defined(__HIP__) && !defined(__CUDA__)
#error "This file is for GPU offloading compilation only"
#endif
#include_next <stdlib.h>
#if __has_include(<llvm-libc-decls/stdlib.h>)
#if defined(__HIP__) || defined(__CUDA__)
#define __LIBC_ATTRS __attribute__((device))
#endif
#pragma omp begin declare target
// The LLVM C library uses this type so we forward declare it.
typedef void (*__atexithandler_t)(void);
// Enforce ABI compatibility with the structs used by the LLVM C library.
_Static_assert(__builtin_offsetof(div_t, quot) == 0, "ABI mismatch!");
_Static_assert(__builtin_offsetof(ldiv_t, quot) == 0, "ABI mismatch!");
_Static_assert(__builtin_offsetof(lldiv_t, quot) == 0, "ABI mismatch!");
#include <llvm-libc-decls/stdlib.h>
#pragma omp end declare target
#undef __LIBC_ATTRS
#endif
#endif // __CLANG_LLVM_LIBC_WRAPPERS_STDLIB_H__
+37
View File
@@ -0,0 +1,37 @@
//===-- Wrapper for C standard string.h declarations on the GPU -----------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#ifndef __CLANG_LLVM_LIBC_WRAPPERS_STRING_H__
#define __CLANG_LLVM_LIBC_WRAPPERS_STRING_H__
#if !defined(_OPENMP) && !defined(__HIP__) && !defined(__CUDA__)
#error "This file is for GPU offloading compilation only"
#endif
// FIXME: The GNU headers provide C++ standard compliant headers when in C++
// mode and the LLVM libc does not. We cannot enable memchr, strchr, strchrnul,
// strpbrk, strrchr, strstr, or strcasestr until this is addressed.
#include_next <string.h>
#if __has_include(<llvm-libc-decls/string.h>)
#if defined(__HIP__) || defined(__CUDA__)
#define __LIBC_ATTRS __attribute__((device))
#endif
#pragma omp begin declare target
#include <llvm-libc-decls/string.h>
#pragma omp end declare target
#undef __LIBC_ATTRS
#endif
#endif // __CLANG_LLVM_LIBC_WRAPPERS_STRING_H__
+29
View File
@@ -16,12 +16,41 @@
/* Define the default attributes for the functions in this file. */
#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("mwaitx")))
/// Establishes a linear address memory range to be monitored and puts
/// the processor in the monitor event pending state. Data stored in the
/// monitored address range causes the processor to exit the pending state.
///
/// \headerfile <x86intrin.h>
///
/// This intrinsic corresponds to the \c MONITORX instruction.
///
/// \param __p
/// The memory range to be monitored. The size of the range is determined by
/// CPUID function 0000_0005h.
/// \param __extensions
/// Optional extensions for the monitoring state.
/// \param __hints
/// Optional hints for the monitoring state.
static __inline__ void __DEFAULT_FN_ATTRS
_mm_monitorx(void * __p, unsigned __extensions, unsigned __hints)
{
__builtin_ia32_monitorx(__p, __extensions, __hints);
}
/// Used with the \c MONITORX instruction to wait while the processor is in
/// the monitor event pending state. Data stored in the monitored address
/// range, or an interrupt, causes the processor to exit the pending state.
///
/// \headerfile <x86intrin.h>
///
/// This intrinsic corresponds to the \c MWAITX instruction.
///
/// \param __extensions
/// Optional extensions for the monitoring state, which can vary by
/// processor.
/// \param __hints
/// Optional hints for the monitoring state, which can vary by processor.
static __inline__ void __DEFAULT_FN_ATTRS
_mm_mwaitx(unsigned __extensions, unsigned __hints, unsigned __clock)
{
+3
View File
@@ -474,6 +474,9 @@ typedef enum memory_order
#define CLK_HALF_FLOAT 0x10DD
#define CLK_FLOAT 0x10DE
#define CLK_UNORM_INT24 0x10DF
#if __OPENCL_C_VERSION__ >= CL_VERSION_3_0
#define CLK_UNORM_INT_101010_2 0x10E0
#endif // __OPENCL_C_VERSION__ >= CL_VERSION_3_0
// Channel order, numbering must be aligned with cl_channel_order in cl.h
//
@@ -40,7 +40,6 @@ extern "C" {
// Import types which will be used by __clang_hip_libdevice_declares.h
#ifndef __cplusplus
#include <stdbool.h>
#include <stdint.h>
#endif
+1 -1
View File
@@ -13,7 +13,7 @@
// which do not use nothrow_t are provided without the <new> header.
#include_next <new>
#if defined(__NVPTX__) && defined(_OPENMP)
#if (defined(__NVPTX__) || defined(__AMDGPU__)) && defined(_OPENMP)
#include <cstdlib>
+12 -6
View File
@@ -253,9 +253,12 @@ _mm_movedup_pd(__m128d __a)
/// the processor in the monitor event pending state. Data stored in the
/// monitored address range causes the processor to exit the pending state.
///
/// The \c MONITOR instruction can be used in kernel mode, and in other modes
/// if MSR <c> C001_0015h[MonMwaitUserEn] </c> is set.
///
/// \headerfile <x86intrin.h>
///
/// This intrinsic corresponds to the <c> MONITOR </c> instruction.
/// This intrinsic corresponds to the \c MONITOR instruction.
///
/// \param __p
/// The memory range to be monitored. The size of the range is determined by
@@ -270,19 +273,22 @@ _mm_monitor(void const *__p, unsigned __extensions, unsigned __hints)
__builtin_ia32_monitor(__p, __extensions, __hints);
}
/// Used with the MONITOR instruction to wait while the processor is in
/// Used with the \c MONITOR instruction to wait while the processor is in
/// the monitor event pending state. Data stored in the monitored address
/// range causes the processor to exit the pending state.
/// range, or an interrupt, causes the processor to exit the pending state.
///
/// The \c MWAIT instruction can be used in kernel mode, and in other modes if
/// MSR <c> C001_0015h[MonMwaitUserEn] </c> is set.
///
/// \headerfile <x86intrin.h>
///
/// This intrinsic corresponds to the <c> MWAIT </c> instruction.
/// This intrinsic corresponds to the \c MWAIT instruction.
///
/// \param __extensions
/// Optional extensions for the monitoring state, which may vary by
/// Optional extensions for the monitoring state, which can vary by
/// processor.
/// \param __hints
/// Optional hints for the monitoring state, which may vary by processor.
/// Optional hints for the monitoring state, which can vary by processor.
static __inline__ void __DEFAULT_FN_ATTRS
_mm_mwait(unsigned __extensions, unsigned __hints)
{
+2 -1
View File
@@ -46,6 +46,7 @@
/* SSE2 */
typedef __vector double __v2df;
typedef __vector float __v4f;
typedef __vector long long __v2di;
typedef __vector unsigned long long __v2du;
typedef __vector int __v4si;
@@ -951,7 +952,7 @@ extern __inline __m128d
_mm_cvtpi32_pd(__m64 __A) {
__v4si __temp;
__v2di __tmp2;
__v2df __result;
__v4f __result;
__temp = (__v4si)vec_splats(__A);
__tmp2 = (__v2di)vec_unpackl(__temp);
+2 -2
View File
@@ -305,9 +305,9 @@ extern __inline int
extern __inline __m128i
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
_mm_blend_epi16(__m128i __A, __m128i __B, const int __imm8) {
__v16qi __charmask = vec_splats((signed char)__imm8);
__v16qu __charmask = vec_splats((unsigned char)__imm8);
__charmask = vec_gb(__charmask);
__v8hu __shortmask = (__v8hu)vec_unpackh(__charmask);
__v8hu __shortmask = (__v8hu)vec_unpackh((__v16qi)__charmask);
#ifdef __BIG_ENDIAN__
__shortmask = vec_reve(__shortmask);
#endif
+65 -2
View File
@@ -7,8 +7,8 @@
*===-----------------------------------------------------------------------===
*/
#if !defined __X86INTRIN_H && !defined __IMMINTRIN_H
#error "Never use <rdseedintrin.h> directly; include <x86intrin.h> instead."
#ifndef __IMMINTRIN_H
#error "Never use <rdseedintrin.h> directly; include <immintrin.h> instead."
#endif
#ifndef __RDSEEDINTRIN_H
@@ -17,12 +17,54 @@
/* Define the default attributes for the functions in this file. */
#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("rdseed")))
/// Stores a hardware-generated 16-bit random value in the memory at \a __p.
///
/// The random number generator complies with NIST SP800-90B and SP800-90C.
///
/// \code{.operation}
/// IF HW_NRND_GEN.ready == 1
/// Store16(__p, HW_NRND_GEN.data)
/// result := 1
/// ELSE
/// Store16(__p, 0)
/// result := 0
/// END
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c RDSEED instruction.
///
/// \param __p
/// Pointer to memory for storing the 16-bit random number.
/// \returns 1 if a random number was generated, 0 if not.
static __inline__ int __DEFAULT_FN_ATTRS
_rdseed16_step(unsigned short *__p)
{
return (int) __builtin_ia32_rdseed16_step(__p);
}
/// Stores a hardware-generated 32-bit random value in the memory at \a __p.
///
/// The random number generator complies with NIST SP800-90B and SP800-90C.
///
/// \code{.operation}
/// IF HW_NRND_GEN.ready == 1
/// Store32(__p, HW_NRND_GEN.data)
/// result := 1
/// ELSE
/// Store32(__p, 0)
/// result := 0
/// END
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c RDSEED instruction.
///
/// \param __p
/// Pointer to memory for storing the 32-bit random number.
/// \returns 1 if a random number was generated, 0 if not.
static __inline__ int __DEFAULT_FN_ATTRS
_rdseed32_step(unsigned int *__p)
{
@@ -30,6 +72,27 @@ _rdseed32_step(unsigned int *__p)
}
#ifdef __x86_64__
/// Stores a hardware-generated 64-bit random value in the memory at \a __p.
///
/// The random number generator complies with NIST SP800-90B and SP800-90C.
///
/// \code{.operation}
/// IF HW_NRND_GEN.ready == 1
/// Store64(__p, HW_NRND_GEN.data)
/// result := 1
/// ELSE
/// Store64(__p, 0)
/// result := 0
/// END
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c RDSEED instruction.
///
/// \param __p
/// Pointer to memory for storing the 64-bit random number.
/// \returns 1 if a random number was generated, 0 if not.
static __inline__ int __DEFAULT_FN_ATTRS
_rdseed64_step(unsigned long long *__p)
{
+28
View File
@@ -0,0 +1,28 @@
/*===---- riscv_ntlh.h - RISC-V NTLH intrinsics ----------------------------===
*
* Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
* See https://llvm.org/LICENSE.txt for license information.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*
*===-----------------------------------------------------------------------===
*/
#ifndef __RISCV_NTLH_H
#define __RISCV_NTLH_H
#ifndef __riscv_zihintntl
#error "NTLH intrinsics require the NTLH extension."
#endif
enum {
__RISCV_NTLH_INNERMOST_PRIVATE = 2,
__RISCV_NTLH_ALL_PRIVATE,
__RISCV_NTLH_INNERMOST_SHARED,
__RISCV_NTLH_ALL
};
#define __riscv_ntl_load(PTR, DOMAIN) __builtin_riscv_ntl_load((PTR), (DOMAIN))
#define __riscv_ntl_store(PTR, VAL, DOMAIN) \
__builtin_riscv_ntl_store((PTR), (VAL), (DOMAIN))
#endif
+240 -51
View File
@@ -25,52 +25,15 @@ extern "C" {
#pragma clang riscv intrinsic vector
#define __riscv_vlenb() __builtin_rvv_vlenb()
enum RVV_CSR {
RVV_VSTART = 0,
RVV_VXSAT,
RVV_VXRM,
RVV_VCSR,
enum __RISCV_FRM {
__RISCV_FRM_RNE = 0,
__RISCV_FRM_RTZ = 1,
__RISCV_FRM_RDN = 2,
__RISCV_FRM_RUP = 3,
__RISCV_FRM_RMM = 4,
};
static __inline__ __attribute__((__always_inline__, __nodebug__))
unsigned long __riscv_vread_csr(enum RVV_CSR __csr) {
unsigned long __rv = 0;
switch (__csr) {
case RVV_VSTART:
__asm__ __volatile__ ("csrr\t%0, vstart" : "=r"(__rv) : : "memory");
break;
case RVV_VXSAT:
__asm__ __volatile__ ("csrr\t%0, vxsat" : "=r"(__rv) : : "memory");
break;
case RVV_VXRM:
__asm__ __volatile__ ("csrr\t%0, vxrm" : "=r"(__rv) : : "memory");
break;
case RVV_VCSR:
__asm__ __volatile__ ("csrr\t%0, vcsr" : "=r"(__rv) : : "memory");
break;
}
return __rv;
}
static __inline__ __attribute__((__always_inline__, __nodebug__))
void __riscv_vwrite_csr(enum RVV_CSR __csr, unsigned long __value) {
switch (__csr) {
case RVV_VSTART:
__asm__ __volatile__ ("csrw\tvstart, %z0" : : "rJ"(__value) : "memory");
break;
case RVV_VXSAT:
__asm__ __volatile__ ("csrw\tvxsat, %z0" : : "rJ"(__value) : "memory");
break;
case RVV_VXRM:
__asm__ __volatile__ ("csrw\tvxrm, %z0" : : "rJ"(__value) : "memory");
break;
case RVV_VCSR:
__asm__ __volatile__ ("csrw\tvcsr, %z0" : : "rJ"(__value) : "memory");
break;
}
}
#define __riscv_vlenb() __builtin_rvv_vlenb()
#define __riscv_vsetvl_e8mf4(avl) __builtin_rvv_vsetvli((size_t)(avl), 0, 6)
#define __riscv_vsetvl_e8mf2(avl) __builtin_rvv_vsetvli((size_t)(avl), 0, 7)
@@ -130,6 +93,13 @@ void __riscv_vwrite_csr(enum RVV_CSR __csr, unsigned long __value) {
#define __riscv_vsetvlmax_e64m8() __builtin_rvv_vsetvlimax(3, 3)
#endif
enum __RISCV_VXRM {
__RISCV_VXRM_RNU = 0,
__RISCV_VXRM_RNE = 1,
__RISCV_VXRM_RDN = 2,
__RISCV_VXRM_ROD = 3,
};
typedef __rvv_bool64_t vbool64_t;
typedef __rvv_bool32_t vbool32_t;
typedef __rvv_bool16_t vbool16_t;
@@ -139,70 +109,289 @@ typedef __rvv_bool2_t vbool2_t;
typedef __rvv_bool1_t vbool1_t;
typedef __rvv_int8mf8_t vint8mf8_t;
typedef __rvv_uint8mf8_t vuint8mf8_t;
typedef __rvv_int8mf8x2_t vint8mf8x2_t;
typedef __rvv_uint8mf8x2_t vuint8mf8x2_t;
typedef __rvv_int8mf8x3_t vint8mf8x3_t;
typedef __rvv_uint8mf8x3_t vuint8mf8x3_t;
typedef __rvv_int8mf8x4_t vint8mf8x4_t;
typedef __rvv_uint8mf8x4_t vuint8mf8x4_t;
typedef __rvv_int8mf8x5_t vint8mf8x5_t;
typedef __rvv_uint8mf8x5_t vuint8mf8x5_t;
typedef __rvv_int8mf8x6_t vint8mf8x6_t;
typedef __rvv_uint8mf8x6_t vuint8mf8x6_t;
typedef __rvv_int8mf8x7_t vint8mf8x7_t;
typedef __rvv_uint8mf8x7_t vuint8mf8x7_t;
typedef __rvv_int8mf8x8_t vint8mf8x8_t;
typedef __rvv_uint8mf8x8_t vuint8mf8x8_t;
typedef __rvv_int8mf4_t vint8mf4_t;
typedef __rvv_uint8mf4_t vuint8mf4_t;
typedef __rvv_int8mf4x2_t vint8mf4x2_t;
typedef __rvv_uint8mf4x2_t vuint8mf4x2_t;
typedef __rvv_int8mf4x3_t vint8mf4x3_t;
typedef __rvv_uint8mf4x3_t vuint8mf4x3_t;
typedef __rvv_int8mf4x4_t vint8mf4x4_t;
typedef __rvv_uint8mf4x4_t vuint8mf4x4_t;
typedef __rvv_int8mf4x5_t vint8mf4x5_t;
typedef __rvv_uint8mf4x5_t vuint8mf4x5_t;
typedef __rvv_int8mf4x6_t vint8mf4x6_t;
typedef __rvv_uint8mf4x6_t vuint8mf4x6_t;
typedef __rvv_int8mf4x7_t vint8mf4x7_t;
typedef __rvv_uint8mf4x7_t vuint8mf4x7_t;
typedef __rvv_int8mf4x8_t vint8mf4x8_t;
typedef __rvv_uint8mf4x8_t vuint8mf4x8_t;
typedef __rvv_int8mf2_t vint8mf2_t;
typedef __rvv_uint8mf2_t vuint8mf2_t;
typedef __rvv_int8mf2x2_t vint8mf2x2_t;
typedef __rvv_uint8mf2x2_t vuint8mf2x2_t;
typedef __rvv_int8mf2x3_t vint8mf2x3_t;
typedef __rvv_uint8mf2x3_t vuint8mf2x3_t;
typedef __rvv_int8mf2x4_t vint8mf2x4_t;
typedef __rvv_uint8mf2x4_t vuint8mf2x4_t;
typedef __rvv_int8mf2x5_t vint8mf2x5_t;
typedef __rvv_uint8mf2x5_t vuint8mf2x5_t;
typedef __rvv_int8mf2x6_t vint8mf2x6_t;
typedef __rvv_uint8mf2x6_t vuint8mf2x6_t;
typedef __rvv_int8mf2x7_t vint8mf2x7_t;
typedef __rvv_uint8mf2x7_t vuint8mf2x7_t;
typedef __rvv_int8mf2x8_t vint8mf2x8_t;
typedef __rvv_uint8mf2x8_t vuint8mf2x8_t;
typedef __rvv_int8m1_t vint8m1_t;
typedef __rvv_uint8m1_t vuint8m1_t;
typedef __rvv_int8m1x2_t vint8m1x2_t;
typedef __rvv_uint8m1x2_t vuint8m1x2_t;
typedef __rvv_int8m1x3_t vint8m1x3_t;
typedef __rvv_uint8m1x3_t vuint8m1x3_t;
typedef __rvv_int8m1x4_t vint8m1x4_t;
typedef __rvv_uint8m1x4_t vuint8m1x4_t;
typedef __rvv_int8m1x5_t vint8m1x5_t;
typedef __rvv_uint8m1x5_t vuint8m1x5_t;
typedef __rvv_int8m1x6_t vint8m1x6_t;
typedef __rvv_uint8m1x6_t vuint8m1x6_t;
typedef __rvv_int8m1x7_t vint8m1x7_t;
typedef __rvv_uint8m1x7_t vuint8m1x7_t;
typedef __rvv_int8m1x8_t vint8m1x8_t;
typedef __rvv_uint8m1x8_t vuint8m1x8_t;
typedef __rvv_int8m2_t vint8m2_t;
typedef __rvv_uint8m2_t vuint8m2_t;
typedef __rvv_int8m2x2_t vint8m2x2_t;
typedef __rvv_uint8m2x2_t vuint8m2x2_t;
typedef __rvv_int8m2x3_t vint8m2x3_t;
typedef __rvv_uint8m2x3_t vuint8m2x3_t;
typedef __rvv_int8m2x4_t vint8m2x4_t;
typedef __rvv_uint8m2x4_t vuint8m2x4_t;
typedef __rvv_int8m4_t vint8m4_t;
typedef __rvv_uint8m4_t vuint8m4_t;
typedef __rvv_int8m4x2_t vint8m4x2_t;
typedef __rvv_uint8m4x2_t vuint8m4x2_t;
typedef __rvv_int8m8_t vint8m8_t;
typedef __rvv_uint8m8_t vuint8m8_t;
typedef __rvv_int16mf4_t vint16mf4_t;
typedef __rvv_uint16mf4_t vuint16mf4_t;
typedef __rvv_int16mf4x2_t vint16mf4x2_t;
typedef __rvv_uint16mf4x2_t vuint16mf4x2_t;
typedef __rvv_int16mf4x3_t vint16mf4x3_t;
typedef __rvv_uint16mf4x3_t vuint16mf4x3_t;
typedef __rvv_int16mf4x4_t vint16mf4x4_t;
typedef __rvv_uint16mf4x4_t vuint16mf4x4_t;
typedef __rvv_int16mf4x5_t vint16mf4x5_t;
typedef __rvv_uint16mf4x5_t vuint16mf4x5_t;
typedef __rvv_int16mf4x6_t vint16mf4x6_t;
typedef __rvv_uint16mf4x6_t vuint16mf4x6_t;
typedef __rvv_int16mf4x7_t vint16mf4x7_t;
typedef __rvv_uint16mf4x7_t vuint16mf4x7_t;
typedef __rvv_int16mf4x8_t vint16mf4x8_t;
typedef __rvv_uint16mf4x8_t vuint16mf4x8_t;
typedef __rvv_int16mf2_t vint16mf2_t;
typedef __rvv_uint16mf2_t vuint16mf2_t;
typedef __rvv_int16mf2x2_t vint16mf2x2_t;
typedef __rvv_uint16mf2x2_t vuint16mf2x2_t;
typedef __rvv_int16mf2x3_t vint16mf2x3_t;
typedef __rvv_uint16mf2x3_t vuint16mf2x3_t;
typedef __rvv_int16mf2x4_t vint16mf2x4_t;
typedef __rvv_uint16mf2x4_t vuint16mf2x4_t;
typedef __rvv_int16mf2x5_t vint16mf2x5_t;
typedef __rvv_uint16mf2x5_t vuint16mf2x5_t;
typedef __rvv_int16mf2x6_t vint16mf2x6_t;
typedef __rvv_uint16mf2x6_t vuint16mf2x6_t;
typedef __rvv_int16mf2x7_t vint16mf2x7_t;
typedef __rvv_uint16mf2x7_t vuint16mf2x7_t;
typedef __rvv_int16mf2x8_t vint16mf2x8_t;
typedef __rvv_uint16mf2x8_t vuint16mf2x8_t;
typedef __rvv_int16m1_t vint16m1_t;
typedef __rvv_uint16m1_t vuint16m1_t;
typedef __rvv_int16m1x2_t vint16m1x2_t;
typedef __rvv_uint16m1x2_t vuint16m1x2_t;
typedef __rvv_int16m1x3_t vint16m1x3_t;
typedef __rvv_uint16m1x3_t vuint16m1x3_t;
typedef __rvv_int16m1x4_t vint16m1x4_t;
typedef __rvv_uint16m1x4_t vuint16m1x4_t;
typedef __rvv_int16m1x5_t vint16m1x5_t;
typedef __rvv_uint16m1x5_t vuint16m1x5_t;
typedef __rvv_int16m1x6_t vint16m1x6_t;
typedef __rvv_uint16m1x6_t vuint16m1x6_t;
typedef __rvv_int16m1x7_t vint16m1x7_t;
typedef __rvv_uint16m1x7_t vuint16m1x7_t;
typedef __rvv_int16m1x8_t vint16m1x8_t;
typedef __rvv_uint16m1x8_t vuint16m1x8_t;
typedef __rvv_int16m2_t vint16m2_t;
typedef __rvv_uint16m2_t vuint16m2_t;
typedef __rvv_int16m2x2_t vint16m2x2_t;
typedef __rvv_uint16m2x2_t vuint16m2x2_t;
typedef __rvv_int16m2x3_t vint16m2x3_t;
typedef __rvv_uint16m2x3_t vuint16m2x3_t;
typedef __rvv_int16m2x4_t vint16m2x4_t;
typedef __rvv_uint16m2x4_t vuint16m2x4_t;
typedef __rvv_int16m4_t vint16m4_t;
typedef __rvv_uint16m4_t vuint16m4_t;
typedef __rvv_int16m4x2_t vint16m4x2_t;
typedef __rvv_uint16m4x2_t vuint16m4x2_t;
typedef __rvv_int16m8_t vint16m8_t;
typedef __rvv_uint16m8_t vuint16m8_t;
typedef __rvv_int32mf2_t vint32mf2_t;
typedef __rvv_uint32mf2_t vuint32mf2_t;
typedef __rvv_int32mf2x2_t vint32mf2x2_t;
typedef __rvv_uint32mf2x2_t vuint32mf2x2_t;
typedef __rvv_int32mf2x3_t vint32mf2x3_t;
typedef __rvv_uint32mf2x3_t vuint32mf2x3_t;
typedef __rvv_int32mf2x4_t vint32mf2x4_t;
typedef __rvv_uint32mf2x4_t vuint32mf2x4_t;
typedef __rvv_int32mf2x5_t vint32mf2x5_t;
typedef __rvv_uint32mf2x5_t vuint32mf2x5_t;
typedef __rvv_int32mf2x6_t vint32mf2x6_t;
typedef __rvv_uint32mf2x6_t vuint32mf2x6_t;
typedef __rvv_int32mf2x7_t vint32mf2x7_t;
typedef __rvv_uint32mf2x7_t vuint32mf2x7_t;
typedef __rvv_int32mf2x8_t vint32mf2x8_t;
typedef __rvv_uint32mf2x8_t vuint32mf2x8_t;
typedef __rvv_int32m1_t vint32m1_t;
typedef __rvv_uint32m1_t vuint32m1_t;
typedef __rvv_int32m1x2_t vint32m1x2_t;
typedef __rvv_uint32m1x2_t vuint32m1x2_t;
typedef __rvv_int32m1x3_t vint32m1x3_t;
typedef __rvv_uint32m1x3_t vuint32m1x3_t;
typedef __rvv_int32m1x4_t vint32m1x4_t;
typedef __rvv_uint32m1x4_t vuint32m1x4_t;
typedef __rvv_int32m1x5_t vint32m1x5_t;
typedef __rvv_uint32m1x5_t vuint32m1x5_t;
typedef __rvv_int32m1x6_t vint32m1x6_t;
typedef __rvv_uint32m1x6_t vuint32m1x6_t;
typedef __rvv_int32m1x7_t vint32m1x7_t;
typedef __rvv_uint32m1x7_t vuint32m1x7_t;
typedef __rvv_int32m1x8_t vint32m1x8_t;
typedef __rvv_uint32m1x8_t vuint32m1x8_t;
typedef __rvv_int32m2_t vint32m2_t;
typedef __rvv_uint32m2_t vuint32m2_t;
typedef __rvv_int32m2x2_t vint32m2x2_t;
typedef __rvv_uint32m2x2_t vuint32m2x2_t;
typedef __rvv_int32m2x3_t vint32m2x3_t;
typedef __rvv_uint32m2x3_t vuint32m2x3_t;
typedef __rvv_int32m2x4_t vint32m2x4_t;
typedef __rvv_uint32m2x4_t vuint32m2x4_t;
typedef __rvv_int32m4_t vint32m4_t;
typedef __rvv_uint32m4_t vuint32m4_t;
typedef __rvv_int32m4x2_t vint32m4x2_t;
typedef __rvv_uint32m4x2_t vuint32m4x2_t;
typedef __rvv_int32m8_t vint32m8_t;
typedef __rvv_uint32m8_t vuint32m8_t;
typedef __rvv_int64m1_t vint64m1_t;
typedef __rvv_uint64m1_t vuint64m1_t;
typedef __rvv_int64m1x2_t vint64m1x2_t;
typedef __rvv_uint64m1x2_t vuint64m1x2_t;
typedef __rvv_int64m1x3_t vint64m1x3_t;
typedef __rvv_uint64m1x3_t vuint64m1x3_t;
typedef __rvv_int64m1x4_t vint64m1x4_t;
typedef __rvv_uint64m1x4_t vuint64m1x4_t;
typedef __rvv_int64m1x5_t vint64m1x5_t;
typedef __rvv_uint64m1x5_t vuint64m1x5_t;
typedef __rvv_int64m1x6_t vint64m1x6_t;
typedef __rvv_uint64m1x6_t vuint64m1x6_t;
typedef __rvv_int64m1x7_t vint64m1x7_t;
typedef __rvv_uint64m1x7_t vuint64m1x7_t;
typedef __rvv_int64m1x8_t vint64m1x8_t;
typedef __rvv_uint64m1x8_t vuint64m1x8_t;
typedef __rvv_int64m2_t vint64m2_t;
typedef __rvv_uint64m2_t vuint64m2_t;
typedef __rvv_int64m2x2_t vint64m2x2_t;
typedef __rvv_uint64m2x2_t vuint64m2x2_t;
typedef __rvv_int64m2x3_t vint64m2x3_t;
typedef __rvv_uint64m2x3_t vuint64m2x3_t;
typedef __rvv_int64m2x4_t vint64m2x4_t;
typedef __rvv_uint64m2x4_t vuint64m2x4_t;
typedef __rvv_int64m4_t vint64m4_t;
typedef __rvv_uint64m4_t vuint64m4_t;
typedef __rvv_int64m4x2_t vint64m4x2_t;
typedef __rvv_uint64m4x2_t vuint64m4x2_t;
typedef __rvv_int64m8_t vint64m8_t;
typedef __rvv_uint64m8_t vuint64m8_t;
#if defined(__riscv_zvfh)
typedef __rvv_float16mf4_t vfloat16mf4_t;
typedef __rvv_float16mf4x2_t vfloat16mf4x2_t;
typedef __rvv_float16mf4x3_t vfloat16mf4x3_t;
typedef __rvv_float16mf4x4_t vfloat16mf4x4_t;
typedef __rvv_float16mf4x5_t vfloat16mf4x5_t;
typedef __rvv_float16mf4x6_t vfloat16mf4x6_t;
typedef __rvv_float16mf4x7_t vfloat16mf4x7_t;
typedef __rvv_float16mf4x8_t vfloat16mf4x8_t;
typedef __rvv_float16mf2_t vfloat16mf2_t;
typedef __rvv_float16mf2x2_t vfloat16mf2x2_t;
typedef __rvv_float16mf2x3_t vfloat16mf2x3_t;
typedef __rvv_float16mf2x4_t vfloat16mf2x4_t;
typedef __rvv_float16mf2x5_t vfloat16mf2x5_t;
typedef __rvv_float16mf2x6_t vfloat16mf2x6_t;
typedef __rvv_float16mf2x7_t vfloat16mf2x7_t;
typedef __rvv_float16mf2x8_t vfloat16mf2x8_t;
typedef __rvv_float16m1_t vfloat16m1_t;
typedef __rvv_float16m1x2_t vfloat16m1x2_t;
typedef __rvv_float16m1x3_t vfloat16m1x3_t;
typedef __rvv_float16m1x4_t vfloat16m1x4_t;
typedef __rvv_float16m1x5_t vfloat16m1x5_t;
typedef __rvv_float16m1x6_t vfloat16m1x6_t;
typedef __rvv_float16m1x7_t vfloat16m1x7_t;
typedef __rvv_float16m1x8_t vfloat16m1x8_t;
typedef __rvv_float16m2_t vfloat16m2_t;
typedef __rvv_float16m2x2_t vfloat16m2x2_t;
typedef __rvv_float16m2x3_t vfloat16m2x3_t;
typedef __rvv_float16m2x4_t vfloat16m2x4_t;
typedef __rvv_float16m4_t vfloat16m4_t;
typedef __rvv_float16m4x2_t vfloat16m4x2_t;
typedef __rvv_float16m8_t vfloat16m8_t;
#endif
#if (__riscv_v_elen_fp >= 32)
typedef __rvv_float32mf2_t vfloat32mf2_t;
typedef __rvv_float32mf2x2_t vfloat32mf2x2_t;
typedef __rvv_float32mf2x3_t vfloat32mf2x3_t;
typedef __rvv_float32mf2x4_t vfloat32mf2x4_t;
typedef __rvv_float32mf2x5_t vfloat32mf2x5_t;
typedef __rvv_float32mf2x6_t vfloat32mf2x6_t;
typedef __rvv_float32mf2x7_t vfloat32mf2x7_t;
typedef __rvv_float32mf2x8_t vfloat32mf2x8_t;
typedef __rvv_float32m1_t vfloat32m1_t;
typedef __rvv_float32m1x2_t vfloat32m1x2_t;
typedef __rvv_float32m1x3_t vfloat32m1x3_t;
typedef __rvv_float32m1x4_t vfloat32m1x4_t;
typedef __rvv_float32m1x5_t vfloat32m1x5_t;
typedef __rvv_float32m1x6_t vfloat32m1x6_t;
typedef __rvv_float32m1x7_t vfloat32m1x7_t;
typedef __rvv_float32m1x8_t vfloat32m1x8_t;
typedef __rvv_float32m2_t vfloat32m2_t;
typedef __rvv_float32m2x2_t vfloat32m2x2_t;
typedef __rvv_float32m2x3_t vfloat32m2x3_t;
typedef __rvv_float32m2x4_t vfloat32m2x4_t;
typedef __rvv_float32m4_t vfloat32m4_t;
typedef __rvv_float32m4x2_t vfloat32m4x2_t;
typedef __rvv_float32m8_t vfloat32m8_t;
#endif
#if (__riscv_v_elen_fp >= 64)
typedef __rvv_float64m1_t vfloat64m1_t;
typedef __rvv_float64m1x2_t vfloat64m1x2_t;
typedef __rvv_float64m1x3_t vfloat64m1x3_t;
typedef __rvv_float64m1x4_t vfloat64m1x4_t;
typedef __rvv_float64m1x5_t vfloat64m1x5_t;
typedef __rvv_float64m1x6_t vfloat64m1x6_t;
typedef __rvv_float64m1x7_t vfloat64m1x7_t;
typedef __rvv_float64m1x8_t vfloat64m1x8_t;
typedef __rvv_float64m2_t vfloat64m2_t;
typedef __rvv_float64m2x2_t vfloat64m2x2_t;
typedef __rvv_float64m2x3_t vfloat64m2x3_t;
typedef __rvv_float64m2x4_t vfloat64m2x4_t;
typedef __rvv_float64m4_t vfloat64m4_t;
typedef __rvv_float64m4x2_t vfloat64m4x2_t;
typedef __rvv_float64m8_t vfloat64m8_t;
#endif
#define __riscv_v_intrinsic_overloading 1
#ifdef __cplusplus
+200
View File
@@ -0,0 +1,200 @@
/*===--------------- sha512intrin.h - SHA512 intrinsics -----------------===
*
* Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
* See https://llvm.org/LICENSE.txt for license information.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*
*===-----------------------------------------------------------------------===
*/
#ifndef __IMMINTRIN_H
#error "Never use <sha512intrin.h> directly; include <immintrin.h> instead."
#endif // __IMMINTRIN_H
#ifndef __SHA512INTRIN_H
#define __SHA512INTRIN_H
#define __DEFAULT_FN_ATTRS256 \
__attribute__((__always_inline__, __nodebug__, __target__("sha512"), \
__min_vector_width__(256)))
/// This intrinisc is one of the two SHA512 message scheduling instructions.
/// The intrinsic performs an intermediate calculation for the next four
/// SHA512 message qwords. The calculated results are stored in \a dst.
///
/// \headerfile <immintrin.h>
///
/// \code
/// __m256i _mm256_sha512msg1_epi64(__m256i __A, __m128i __B)
/// \endcode
///
/// This intrinsic corresponds to the \c VSHA512MSG1 instruction.
///
/// \param __A
/// A 256-bit vector of [4 x long long].
/// \param __B
/// A 128-bit vector of [2 x long long].
/// \returns
/// A 256-bit vector of [4 x long long].
///
/// \code{.operation}
/// DEFINE ROR64(qword, n) {
/// count := n % 64
/// dest := (qword >> count) | (qword << (64 - count))
/// RETURN dest
/// }
/// DEFINE SHR64(qword, n) {
/// RETURN qword >> n
/// }
/// DEFINE s0(qword):
/// RETURN ROR64(qword,1) ^ ROR64(qword, 8) ^ SHR64(qword, 7)
/// }
/// W[4] := __B.qword[0]
/// W[3] := __A.qword[3]
/// W[2] := __A.qword[2]
/// W[1] := __A.qword[1]
/// W[0] := __A.qword[0]
/// dst.qword[3] := W[3] + s0(W[4])
/// dst.qword[2] := W[2] + s0(W[3])
/// dst.qword[1] := W[1] + s0(W[2])
/// dst.qword[0] := W[0] + s0(W[1])
/// dst[MAX:256] := 0
/// \endcode
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_sha512msg1_epi64(__m256i __A, __m128i __B) {
return (__m256i)__builtin_ia32_vsha512msg1((__v4du)__A, (__v2du)__B);
}
/// This intrinisc is one of the two SHA512 message scheduling instructions.
/// The intrinsic performs the final calculation for the next four SHA512
/// message qwords. The calculated results are stored in \a dst.
///
/// \headerfile <immintrin.h>
///
/// \code
/// __m256i _mm256_sha512msg2_epi64(__m256i __A, __m256i __B)
/// \endcode
///
/// This intrinsic corresponds to the \c VSHA512MSG2 instruction.
///
/// \param __A
/// A 256-bit vector of [4 x long long].
/// \param __B
/// A 256-bit vector of [4 x long long].
/// \returns
/// A 256-bit vector of [4 x long long].
///
/// \code{.operation}
/// DEFINE ROR64(qword, n) {
/// count := n % 64
/// dest := (qword >> count) | (qword << (64 - count))
/// RETURN dest
/// }
/// DEFINE SHR64(qword, n) {
/// RETURN qword >> n
/// }
/// DEFINE s1(qword) {
/// RETURN ROR64(qword,19) ^ ROR64(qword, 61) ^ SHR64(qword, 6)
/// }
/// W[14] := __B.qword[2]
/// W[15] := __B.qword[3]
/// W[16] := __A.qword[0] + s1(W[14])
/// W[17] := __A.qword[1] + s1(W[15])
/// W[18] := __A.qword[2] + s1(W[16])
/// W[19] := __A.qword[3] + s1(W[17])
/// dst.qword[3] := W[19]
/// dst.qword[2] := W[18]
/// dst.qword[1] := W[17]
/// dst.qword[0] := W[16]
/// dst[MAX:256] := 0
/// \endcode
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_sha512msg2_epi64(__m256i __A, __m256i __B) {
return (__m256i)__builtin_ia32_vsha512msg2((__v4du)__A, (__v4du)__B);
}
/// This intrinisc performs two rounds of SHA512 operation using initial SHA512
/// state (C,D,G,H) from \a __A, an initial SHA512 state (A,B,E,F) from
/// \a __A, and a pre-computed sum of the next two round message qwords and
/// the corresponding round constants from \a __C (only the two lower qwords
/// of the third operand). The updated SHA512 state (A,B,E,F) is written to
/// \a __A, and \a __A can be used as the updated state (C,D,G,H) in later
/// rounds.
///
/// \headerfile <immintrin.h>
///
/// \code
/// __m256i _mm256_sha512rnds2_epi64(__m256i __A, __m256i __B, __m128i __C)
/// \endcode
///
/// This intrinsic corresponds to the \c VSHA512RNDS2 instruction.
///
/// \param __A
/// A 256-bit vector of [4 x long long].
/// \param __B
/// A 256-bit vector of [4 x long long].
/// \param __C
/// A 128-bit vector of [2 x long long].
/// \returns
/// A 256-bit vector of [4 x long long].
///
/// \code{.operation}
/// DEFINE ROR64(qword, n) {
/// count := n % 64
/// dest := (qword >> count) | (qword << (64 - count))
/// RETURN dest
/// }
/// DEFINE SHR64(qword, n) {
/// RETURN qword >> n
/// }
/// DEFINE cap_sigma0(qword) {
/// RETURN ROR64(qword,28) ^ ROR64(qword, 34) ^ ROR64(qword, 39)
/// }
/// DEFINE cap_sigma1(qword) {
/// RETURN ROR64(qword,14) ^ ROR64(qword, 18) ^ ROR64(qword, 41)
/// }
/// DEFINE MAJ(a,b,c) {
/// RETURN (a & b) ^ (a & c) ^ (b & c)
/// }
/// DEFINE CH(e,f,g) {
/// RETURN (e & f) ^ (g & ~e)
/// }
/// A[0] := __B.qword[3]
/// B[0] := __B.qword[2]
/// C[0] := __C.qword[3]
/// D[0] := __C.qword[2]
/// E[0] := __B.qword[1]
/// F[0] := __B.qword[0]
/// G[0] := __C.qword[1]
/// H[0] := __C.qword[0]
/// WK[0]:= __A.qword[0]
/// WK[1]:= __A.qword[1]
/// FOR i := 0 to 1:
/// A[i+1] := CH(E[i], F[i], G[i]) +
/// cap_sigma1(E[i]) + WK[i] + H[i] +
/// MAJ(A[i], B[i], C[i]) +
/// cap_sigma0(A[i])
/// B[i+1] := A[i]
/// C[i+1] := B[i]
/// D[i+1] := C[i]
/// E[i+1] := CH(E[i], F[i], G[i]) +
/// cap_sigma1(E[i]) + WK[i] + H[i] + D[i]
/// F[i+1] := E[i]
/// G[i+1] := F[i]
/// H[i+1] := G[i]
/// ENDFOR
/// dst.qword[3] := A[2]
/// dst.qword[2] := B[2]
/// dst.qword[1] := E[2]
/// dst.qword[0] := F[2]
/// dst[MAX:256] := 0
/// \endcode
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_sha512rnds2_epi64(__m256i __A, __m256i __B, __m128i __C) {
return (__m256i)__builtin_ia32_vsha512rnds2((__v4du)__A, (__v4du)__B,
(__v2du)__C);
}
#undef __DEFAULT_FN_ATTRS256
#endif // __SHA512INTRIN_H
+128
View File
@@ -17,39 +17,167 @@
/* Define the default attributes for the functions in this file. */
#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("sha"), __min_vector_width__(128)))
/// Performs four iterations of the inner loop of the SHA-1 message digest
/// algorithm using the starting SHA-1 state (A, B, C, D) from the 128-bit
/// vector of [4 x i32] in \a V1 and the next four 32-bit elements of the
/// message from the 128-bit vector of [4 x i32] in \a V2. Note that the
/// SHA-1 state variable E must have already been added to \a V2
/// (\c _mm_sha1nexte_epu32() can perform this step). Returns the updated
/// SHA-1 state (A, B, C, D) as a 128-bit vector of [4 x i32].
///
/// The SHA-1 algorithm has an inner loop of 80 iterations, twenty each
/// with a different combining function and rounding constant. This
/// intrinsic performs four iterations using a combining function and
/// rounding constant selected by \a M[1:0].
///
/// \headerfile <immintrin.h>
///
/// \code
/// __m128i _mm_sha1rnds4_epu32(__m128i V1, __m128i V2, const int M);
/// \endcode
///
/// This intrinsic corresponds to the \c SHA1RNDS4 instruction.
///
/// \param V1
/// A 128-bit vector of [4 x i32] containing the initial SHA-1 state.
/// \param V2
/// A 128-bit vector of [4 x i32] containing the next four elements of
/// the message, plus SHA-1 state variable E.
/// \param M
/// An immediate value where bits [1:0] select among four possible
/// combining functions and rounding constants (not specified here).
/// \returns A 128-bit vector of [4 x i32] containing the updated SHA-1 state.
#define _mm_sha1rnds4_epu32(V1, V2, M) \
__builtin_ia32_sha1rnds4((__v4si)(__m128i)(V1), (__v4si)(__m128i)(V2), (M))
/// Calculates the SHA-1 state variable E from the SHA-1 state variables in
/// the 128-bit vector of [4 x i32] in \a __X, adds that to the next set of
/// four message elements in the 128-bit vector of [4 x i32] in \a __Y, and
/// returns the result.
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c SHA1NEXTE instruction.
///
/// \param __X
/// A 128-bit vector of [4 x i32] containing the current SHA-1 state.
/// \param __Y
/// A 128-bit vector of [4 x i32] containing the next four elements of the
/// message.
/// \returns A 128-bit vector of [4 x i32] containing the updated SHA-1
/// values.
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_sha1nexte_epu32(__m128i __X, __m128i __Y)
{
return (__m128i)__builtin_ia32_sha1nexte((__v4si)__X, (__v4si)__Y);
}
/// Performs an intermediate calculation for deriving the next four SHA-1
/// message elements using previous message elements from the 128-bit
/// vectors of [4 x i32] in \a __X and \a __Y, and returns the result.
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c SHA1MSG1 instruction.
///
/// \param __X
/// A 128-bit vector of [4 x i32] containing previous message elements.
/// \param __Y
/// A 128-bit vector of [4 x i32] containing previous message elements.
/// \returns A 128-bit vector of [4 x i32] containing the derived SHA-1
/// elements.
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_sha1msg1_epu32(__m128i __X, __m128i __Y)
{
return (__m128i)__builtin_ia32_sha1msg1((__v4si)__X, (__v4si)__Y);
}
/// Performs the final calculation for deriving the next four SHA-1 message
/// elements using previous message elements from the 128-bit vectors of
/// [4 x i32] in \a __X and \a __Y, and returns the result.
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c SHA1MSG2 instruction.
///
/// \param __X
/// A 128-bit vector of [4 x i32] containing an intermediate result.
/// \param __Y
/// A 128-bit vector of [4 x i32] containing previous message values.
/// \returns A 128-bit vector of [4 x i32] containing the updated SHA-1
/// values.
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_sha1msg2_epu32(__m128i __X, __m128i __Y)
{
return (__m128i)__builtin_ia32_sha1msg2((__v4si)__X, (__v4si)__Y);
}
/// Performs two rounds of SHA-256 operation using the following inputs: a
/// starting SHA-256 state (C, D, G, H) from the 128-bit vector of
/// [4 x i32] in \a __X; a starting SHA-256 state (A, B, E, F) from the
/// 128-bit vector of [4 x i32] in \a __Y; and a pre-computed sum of the
/// next two message elements (unsigned 32-bit integers) and corresponding
/// rounding constants from the 128-bit vector of [4 x i32] in \a __Z.
/// Returns the updated SHA-256 state (A, B, E, F) as a 128-bit vector of
/// [4 x i32].
///
/// The SHA-256 algorithm has a core loop of 64 iterations. This intrinsic
/// performs two of those iterations.
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c SHA256RNDS2 instruction.
///
/// \param __X
/// A 128-bit vector of [4 x i32] containing part of the initial SHA-256
/// state.
/// \param __Y
/// A 128-bit vector of [4 x i32] containing part of the initial SHA-256
/// state.
/// \param __Z
/// A 128-bit vector of [4 x i32] containing additional input to the
/// SHA-256 operation.
/// \returns A 128-bit vector of [4 x i32] containing the updated SHA-1 state.
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_sha256rnds2_epu32(__m128i __X, __m128i __Y, __m128i __Z)
{
return (__m128i)__builtin_ia32_sha256rnds2((__v4si)__X, (__v4si)__Y, (__v4si)__Z);
}
/// Performs an intermediate calculation for deriving the next four SHA-256
/// message elements using previous message elements from the 128-bit
/// vectors of [4 x i32] in \a __X and \a __Y, and returns the result.
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c SHA256MSG1 instruction.
///
/// \param __X
/// A 128-bit vector of [4 x i32] containing previous message elements.
/// \param __Y
/// A 128-bit vector of [4 x i32] containing previous message elements.
/// \returns A 128-bit vector of [4 x i32] containing the updated SHA-256
/// values.
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_sha256msg1_epu32(__m128i __X, __m128i __Y)
{
return (__m128i)__builtin_ia32_sha256msg1((__v4si)__X, (__v4si)__Y);
}
/// Performs the final calculation for deriving the next four SHA-256 message
/// elements using previous message elements from the 128-bit vectors of
/// [4 x i32] in \a __X and \a __Y, and returns the result.
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c SHA256MSG2 instruction.
///
/// \param __X
/// A 128-bit vector of [4 x i32] containing an intermediate result.
/// \param __Y
/// A 128-bit vector of [4 x i32] containing previous message values.
/// \returns A 128-bit vector of [4 x i32] containing the updated SHA-256
/// values.
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_sha256msg2_epu32(__m128i __X, __m128i __Y)
{
+16
View File
@@ -0,0 +1,16 @@
//===----- sifive_vector.h - SiFive Vector definitions --------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#ifndef _SIFIVE_VECTOR_H_
#define _SIFIVE_VECTOR_H_
#include "riscv_vector.h"
#pragma clang riscv intrinsic sifive_vector
#endif //_SIFIVE_VECTOR_H_
+238
View File
@@ -0,0 +1,238 @@
/*===-------------------- sm3intrin.h - SM3 intrinsics ---------------------===
*
* Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
* See https://llvm.org/LICENSE.txt for license information.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*
*===-----------------------------------------------------------------------===
*/
#ifndef __IMMINTRIN_H
#error "Never use <sm3intrin.h> directly; include <immintrin.h> instead."
#endif // __IMMINTRIN_H
#ifndef __SM3INTRIN_H
#define __SM3INTRIN_H
#define __DEFAULT_FN_ATTRS128 \
__attribute__((__always_inline__, __nodebug__, __target__("sm3"), \
__min_vector_width__(128)))
/// This intrinisc is one of the two SM3 message scheduling intrinsics. The
/// intrinsic performs an initial calculation for the next four SM3 message
/// words. The calculated results are stored in \a dst.
///
/// \headerfile <immintrin.h>
///
/// \code
/// __m128i _mm_sm3msg1_epi32(__m128i __A, __m128i __B, __m128i __C)
/// \endcode
///
/// This intrinsic corresponds to the \c VSM3MSG1 instruction.
///
/// \param __A
/// A 128-bit vector of [4 x int].
/// \param __B
/// A 128-bit vector of [4 x int].
/// \param __C
/// A 128-bit vector of [4 x int].
/// \returns
/// A 128-bit vector of [4 x int].
///
/// \code{.operation}
/// DEFINE ROL32(dword, n) {
/// count := n % 32
/// dest := (dword << count) | (dword >> (32 - count))
/// RETURN dest
/// }
/// DEFINE P1(x) {
/// RETURN x ^ ROL32(x, 15) ^ ROL32(x, 23)
/// }
/// W[0] := __C.dword[0]
/// W[1] := __C.dword[1]
/// W[2] := __C.dword[2]
/// W[3] := __C.dword[3]
/// W[7] := __A.dword[0]
/// W[8] := __A.dword[1]
/// W[9] := __A.dword[2]
/// W[10] := __A.dword[3]
/// W[13] := __B.dword[0]
/// W[14] := __B.dword[1]
/// W[15] := __B.dword[2]
/// TMP0 := W[7] ^ W[0] ^ ROL32(W[13], 15)
/// TMP1 := W[8] ^ W[1] ^ ROL32(W[14], 15)
/// TMP2 := W[9] ^ W[2] ^ ROL32(W[15], 15)
/// TMP3 := W[10] ^ W[3]
/// dst.dword[0] := P1(TMP0)
/// dst.dword[1] := P1(TMP1)
/// dst.dword[2] := P1(TMP2)
/// dst.dword[3] := P1(TMP3)
/// dst[MAX:128] := 0
/// \endcode
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_sm3msg1_epi32(__m128i __A,
__m128i __B,
__m128i __C) {
return (__m128i)__builtin_ia32_vsm3msg1((__v4su)__A, (__v4su)__B,
(__v4su)__C);
}
/// This intrinisc is one of the two SM3 message scheduling intrinsics. The
/// intrinsic performs the final calculation for the next four SM3 message
/// words. The calculated results are stored in \a dst.
///
/// \headerfile <immintrin.h>
///
/// \code
/// __m128i _mm_sm3msg2_epi32(__m128i __A, __m128i __B, __m128i __C)
/// \endcode
///
/// This intrinsic corresponds to the \c VSM3MSG2 instruction.
///
/// \param __A
/// A 128-bit vector of [4 x int].
/// \param __B
/// A 128-bit vector of [4 x int].
/// \param __C
/// A 128-bit vector of [4 x int].
/// \returns
/// A 128-bit vector of [4 x int].
///
/// \code{.operation}
/// DEFINE ROL32(dword, n) {
/// count := n % 32
/// dest := (dword << count) | (dword >> (32-count))
/// RETURN dest
/// }
/// WTMP[0] := __A.dword[0]
/// WTMP[1] := __A.dword[1]
/// WTMP[2] := __A.dword[2]
/// WTMP[3] := __A.dword[3]
/// W[3] := __B.dword[0]
/// W[4] := __B.dword[1]
/// W[5] := __B.dword[2]
/// W[6] := __B.dword[3]
/// W[10] := __C.dword[0]
/// W[11] := __C.dword[1]
/// W[12] := __C.dword[2]
/// W[13] := __C.dword[3]
/// W[16] := ROL32(W[3], 7) ^ W[10] ^ WTMP[0]
/// W[17] := ROL32(W[4], 7) ^ W[11] ^ WTMP[1]
/// W[18] := ROL32(W[5], 7) ^ W[12] ^ WTMP[2]
/// W[19] := ROL32(W[6], 7) ^ W[13] ^ WTMP[3]
/// W[19] := W[19] ^ ROL32(W[16], 6) ^ ROL32(W[16], 15) ^ ROL32(W[16], 30)
/// dst.dword[0] := W[16]
/// dst.dword[1] := W[17]
/// dst.dword[2] := W[18]
/// dst.dword[3] := W[19]
/// dst[MAX:128] := 0
/// \endcode
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_sm3msg2_epi32(__m128i __A,
__m128i __B,
__m128i __C) {
return (__m128i)__builtin_ia32_vsm3msg2((__v4su)__A, (__v4su)__B,
(__v4su)__C);
}
/// This intrinsic performs two rounds of SM3 operation using initial SM3 state
/// (C, D, G, H) from \a __A, an initial SM3 states (A, B, E, F)
/// from \a __B and a pre-computed words from the \a __C. \a __A with
/// initial SM3 state of (C, D, G, H) assumes input of non-rotated left
/// variables from previous state. The updated SM3 state (A, B, E, F) is
/// written to \a __A. The \a imm8 should contain the even round number
/// for the first of the two rounds computed by this instruction. The
/// computation masks the \a imm8 value by ANDing it with 0x3E so that only
/// even round numbers from 0 through 62 are used for this operation. The
/// calculated results are stored in \a dst.
///
/// \headerfile <immintrin.h>
///
/// \code
/// __m128i _mm_sm3rnds2_epi32(__m128i __A, __m128i __B, __m128i __C, const int
/// imm8) \endcode
///
/// This intrinsic corresponds to the \c VSM3RNDS2 instruction.
///
/// \param __A
/// A 128-bit vector of [4 x int].
/// \param __B
/// A 128-bit vector of [4 x int].
/// \param __C
/// A 128-bit vector of [4 x int].
/// \param imm8
/// A 8-bit constant integer.
/// \returns
/// A 128-bit vector of [4 x int].
///
/// \code{.operation}
/// DEFINE ROL32(dword, n) {
/// count := n % 32
/// dest := (dword << count) | (dword >> (32-count))
/// RETURN dest
/// }
/// DEFINE P0(dword) {
/// RETURN dword ^ ROL32(dword, 9) ^ ROL32(dword, 17)
/// }
/// DEFINE FF(x,y,z, round){
/// IF round < 16
/// RETURN (x ^ y ^ z)
/// ELSE
/// RETURN (x & y) | (x & z) | (y & z)
/// FI
/// }
/// DEFINE GG(x, y, z, round){
/// IF round < 16
/// RETURN (x ^ y ^ z)
/// ELSE
/// RETURN (x & y) | (~x & z)
/// FI
/// }
/// A[0] := __B.dword[3]
/// B[0] := __B.dword[2]
/// C[0] := __A.dword[3]
/// D[0] := __A.dword[2]
/// E[0] := __B.dword[1]
/// F[0] := __B.dword[0]
/// G[0] := __A.dword[1]
/// H[0] := __A.dword[0]
/// W[0] := __C.dword[0]
/// W[1] := __C.dword[1]
/// W[4] := __C.dword[2]
/// W[5] := __C.dword[3]
/// C[0] := ROL32(C[0], 9)
/// D[0] := ROL32(D[0], 9)
/// G[0] := ROL32(G[0], 19)
/// H[0] := ROL32(H[0], 19)
/// ROUND := __D & 0x3E
/// IF ROUND < 16
/// CONST := 0x79CC4519
/// ELSE
/// CONST := 0x7A879D8A
/// FI
/// CONST := ROL32(CONST,ROUND)
/// FOR i:= 0 to 1
/// S1 := ROL32((ROL32(A[i], 12) + E[i] + CONST), 7)
/// S2 := S1 ^ ROL32(A[i], 12)
/// T1 := FF(A[i], B[i], C[i], ROUND) + D[i] + S2 + (W[i] ^ W[i+4])
/// T2 := GG(E[i], F[i], G[i], ROUND) + H[i] + S1 + W[i]
/// D[i+1] := C[i]
/// C[i+1] := ROL32(B[i],9)
/// B[i+1] := A[i]
/// A[i+1] := T1
/// H[i+1] := G[i]
/// G[i+1] := ROL32(F[i], 19)
/// F[i+1] := E[i]
/// E[i+1] := P0(T2)
/// CONST := ROL32(CONST, 1)
/// ENDFOR
/// dst.dword[3] := A[2]
/// dst.dword[2] := B[2]
/// dst.dword[1] := E[2]
/// dst.dword[0] := F[2]
/// dst[MAX:128] := 0
/// \endcode
#define _mm_sm3rnds2_epi32(A, B, C, D) \
(__m128i) __builtin_ia32_vsm3rnds2((__v4su)A, (__v4su)B, (__v4su)C, (int)D)
#undef __DEFAULT_FN_ATTRS128
#endif // __SM3INTRIN_H
+269
View File
@@ -0,0 +1,269 @@
/*===--------------- sm4intrin.h - SM4 intrinsics -----------------===
*
* Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
* See https://llvm.org/LICENSE.txt for license information.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*
*===-----------------------------------------------------------------------===
*/
#ifndef __IMMINTRIN_H
#error "Never use <sm4intrin.h> directly; include <immintrin.h> instead."
#endif // __IMMINTRIN_H
#ifndef __SM4INTRIN_H
#define __SM4INTRIN_H
/// This intrinsic performs four rounds of SM4 key expansion. The intrinsic
/// operates on independent 128-bit lanes. The calculated results are
/// stored in \a dst.
/// \headerfile <immintrin.h>
///
/// \code
/// __m128i _mm_sm4key4_epi32(__m128i __A, __m128i __B)
/// \endcode
///
/// This intrinsic corresponds to the \c VSM4KEY4 instruction.
///
/// \param __A
/// A 128-bit vector of [4 x int].
/// \param __B
/// A 128-bit vector of [4 x int].
/// \returns
/// A 128-bit vector of [4 x int].
///
/// \code{.operation}
/// DEFINE ROL32(dword, n) {
/// count := n % 32
/// dest := (dword << count) | (dword >> (32-count))
/// RETURN dest
/// }
/// DEFINE SBOX_BYTE(dword, i) {
/// RETURN sbox[dword.byte[i]]
/// }
/// DEFINE lower_t(dword) {
/// tmp.byte[0] := SBOX_BYTE(dword, 0)
/// tmp.byte[1] := SBOX_BYTE(dword, 1)
/// tmp.byte[2] := SBOX_BYTE(dword, 2)
/// tmp.byte[3] := SBOX_BYTE(dword, 3)
/// RETURN tmp
/// }
/// DEFINE L_KEY(dword) {
/// RETURN dword ^ ROL32(dword, 13) ^ ROL32(dword, 23)
/// }
/// DEFINE T_KEY(dword) {
/// RETURN L_KEY(lower_t(dword))
/// }
/// DEFINE F_KEY(X0, X1, X2, X3, round_key) {
/// RETURN X0 ^ T_KEY(X1 ^ X2 ^ X3 ^ round_key)
/// }
/// FOR i:= 0 to 0
/// P[0] := __B.xmm[i].dword[0]
/// P[1] := __B.xmm[i].dword[1]
/// P[2] := __B.xmm[i].dword[2]
/// P[3] := __B.xmm[i].dword[3]
/// C[0] := F_KEY(P[0], P[1], P[2], P[3], __A.xmm[i].dword[0])
/// C[1] := F_KEY(P[1], P[2], P[3], C[0], __A.xmm[i].dword[1])
/// C[2] := F_KEY(P[2], P[3], C[0], C[1], __A.xmm[i].dword[2])
/// C[3] := F_KEY(P[3], C[0], C[1], C[2], __A.xmm[i].dword[3])
/// DEST.xmm[i].dword[0] := C[0]
/// DEST.xmm[i].dword[1] := C[1]
/// DEST.xmm[i].dword[2] := C[2]
/// DEST.xmm[i].dword[3] := C[3]
/// ENDFOR
/// DEST[MAX:128] := 0
/// \endcode
#define _mm_sm4key4_epi32(A, B) \
(__m128i) __builtin_ia32_vsm4key4128((__v4su)A, (__v4su)B)
/// This intrinsic performs four rounds of SM4 key expansion. The intrinsic
/// operates on independent 128-bit lanes. The calculated results are
/// stored in \a dst.
/// \headerfile <immintrin.h>
///
/// \code
/// __m256i _mm256_sm4key4_epi32(__m256i __A, __m256i __B)
/// \endcode
///
/// This intrinsic corresponds to the \c VSM4KEY4 instruction.
///
/// \param __A
/// A 256-bit vector of [8 x int].
/// \param __B
/// A 256-bit vector of [8 x int].
/// \returns
/// A 256-bit vector of [8 x int].
///
/// \code{.operation}
/// DEFINE ROL32(dword, n) {
/// count := n % 32
/// dest := (dword << count) | (dword >> (32-count))
/// RETURN dest
/// }
/// DEFINE SBOX_BYTE(dword, i) {
/// RETURN sbox[dword.byte[i]]
/// }
/// DEFINE lower_t(dword) {
/// tmp.byte[0] := SBOX_BYTE(dword, 0)
/// tmp.byte[1] := SBOX_BYTE(dword, 1)
/// tmp.byte[2] := SBOX_BYTE(dword, 2)
/// tmp.byte[3] := SBOX_BYTE(dword, 3)
/// RETURN tmp
/// }
/// DEFINE L_KEY(dword) {
/// RETURN dword ^ ROL32(dword, 13) ^ ROL32(dword, 23)
/// }
/// DEFINE T_KEY(dword) {
/// RETURN L_KEY(lower_t(dword))
/// }
/// DEFINE F_KEY(X0, X1, X2, X3, round_key) {
/// RETURN X0 ^ T_KEY(X1 ^ X2 ^ X3 ^ round_key)
/// }
/// FOR i:= 0 to 1
/// P[0] := __B.xmm[i].dword[0]
/// P[1] := __B.xmm[i].dword[1]
/// P[2] := __B.xmm[i].dword[2]
/// P[3] := __B.xmm[i].dword[3]
/// C[0] := F_KEY(P[0], P[1], P[2], P[3], __A.xmm[i].dword[0])
/// C[1] := F_KEY(P[1], P[2], P[3], C[0], __A.xmm[i].dword[1])
/// C[2] := F_KEY(P[2], P[3], C[0], C[1], __A.xmm[i].dword[2])
/// C[3] := F_KEY(P[3], C[0], C[1], C[2], __A.xmm[i].dword[3])
/// DEST.xmm[i].dword[0] := C[0]
/// DEST.xmm[i].dword[1] := C[1]
/// DEST.xmm[i].dword[2] := C[2]
/// DEST.xmm[i].dword[3] := C[3]
/// ENDFOR
/// DEST[MAX:256] := 0
/// \endcode
#define _mm256_sm4key4_epi32(A, B) \
(__m256i) __builtin_ia32_vsm4key4256((__v8su)A, (__v8su)B)
/// This intrinisc performs four rounds of SM4 encryption. The intrinisc
/// operates on independent 128-bit lanes. The calculated results are
/// stored in \a dst.
/// \headerfile <immintrin.h>
///
/// \code
/// __m128i _mm_sm4rnds4_epi32(__m128i __A, __m128i __B)
/// \endcode
///
/// This intrinsic corresponds to the \c VSM4RNDS4 instruction.
///
/// \param __A
/// A 128-bit vector of [4 x int].
/// \param __B
/// A 128-bit vector of [4 x int].
/// \returns
/// A 128-bit vector of [4 x int].
///
/// \code{.operation}
/// DEFINE ROL32(dword, n) {
/// count := n % 32
/// dest := (dword << count) | (dword >> (32-count))
/// RETURN dest
/// }
/// DEFINE lower_t(dword) {
/// tmp.byte[0] := SBOX_BYTE(dword, 0)
/// tmp.byte[1] := SBOX_BYTE(dword, 1)
/// tmp.byte[2] := SBOX_BYTE(dword, 2)
/// tmp.byte[3] := SBOX_BYTE(dword, 3)
/// RETURN tmp
/// }
/// DEFINE L_RND(dword) {
/// tmp := dword
/// tmp := tmp ^ ROL32(dword, 2)
/// tmp := tmp ^ ROL32(dword, 10)
/// tmp := tmp ^ ROL32(dword, 18)
/// tmp := tmp ^ ROL32(dword, 24)
/// RETURN tmp
/// }
/// DEFINE T_RND(dword) {
/// RETURN L_RND(lower_t(dword))
/// }
/// DEFINE F_RND(X0, X1, X2, X3, round_key) {
/// RETURN X0 ^ T_RND(X1 ^ X2 ^ X3 ^ round_key)
/// }
/// FOR i:= 0 to 0
/// P[0] := __B.xmm[i].dword[0]
/// P[1] := __B.xmm[i].dword[1]
/// P[2] := __B.xmm[i].dword[2]
/// P[3] := __B.xmm[i].dword[3]
/// C[0] := F_RND(P[0], P[1], P[2], P[3], __A.xmm[i].dword[0])
/// C[1] := F_RND(P[1], P[2], P[3], C[0], __A.xmm[i].dword[1])
/// C[2] := F_RND(P[2], P[3], C[0], C[1], __A.xmm[i].dword[2])
/// C[3] := F_RND(P[3], C[0], C[1], C[2], __A.xmm[i].dword[3])
/// DEST.xmm[i].dword[0] := C[0]
/// DEST.xmm[i].dword[1] := C[1]
/// DEST.xmm[i].dword[2] := C[2]
/// DEST.xmm[i].dword[3] := C[3]
/// ENDFOR
/// DEST[MAX:128] := 0
/// \endcode
#define _mm_sm4rnds4_epi32(A, B) \
(__m128i) __builtin_ia32_vsm4rnds4128((__v4su)A, (__v4su)B)
/// This intrinisc performs four rounds of SM4 encryption. The intrinisc
/// operates on independent 128-bit lanes. The calculated results are
/// stored in \a dst.
/// \headerfile <immintrin.h>
///
/// \code
/// __m256i _mm256_sm4rnds4_epi32(__m256i __A, __m256i __B)
/// \endcode
///
/// This intrinsic corresponds to the \c VSM4RNDS4 instruction.
///
/// \param __A
/// A 256-bit vector of [8 x int].
/// \param __B
/// A 256-bit vector of [8 x int].
/// \returns
/// A 256-bit vector of [8 x int].
///
/// \code{.operation}
/// DEFINE ROL32(dword, n) {
/// count := n % 32
/// dest := (dword << count) | (dword >> (32-count))
/// RETURN dest
/// }
/// DEFINE lower_t(dword) {
/// tmp.byte[0] := SBOX_BYTE(dword, 0)
/// tmp.byte[1] := SBOX_BYTE(dword, 1)
/// tmp.byte[2] := SBOX_BYTE(dword, 2)
/// tmp.byte[3] := SBOX_BYTE(dword, 3)
/// RETURN tmp
/// }
/// DEFINE L_RND(dword) {
/// tmp := dword
/// tmp := tmp ^ ROL32(dword, 2)
/// tmp := tmp ^ ROL32(dword, 10)
/// tmp := tmp ^ ROL32(dword, 18)
/// tmp := tmp ^ ROL32(dword, 24)
/// RETURN tmp
/// }
/// DEFINE T_RND(dword) {
/// RETURN L_RND(lower_t(dword))
/// }
/// DEFINE F_RND(X0, X1, X2, X3, round_key) {
/// RETURN X0 ^ T_RND(X1 ^ X2 ^ X3 ^ round_key)
/// }
/// FOR i:= 0 to 0
/// P[0] := __B.xmm[i].dword[0]
/// P[1] := __B.xmm[i].dword[1]
/// P[2] := __B.xmm[i].dword[2]
/// P[3] := __B.xmm[i].dword[3]
/// C[0] := F_RND(P[0], P[1], P[2], P[3], __A.xmm[i].dword[0])
/// C[1] := F_RND(P[1], P[2], P[3], C[0], __A.xmm[i].dword[1])
/// C[2] := F_RND(P[2], P[3], C[0], C[1], __A.xmm[i].dword[2])
/// C[3] := F_RND(P[3], C[0], C[1], C[2], __A.xmm[i].dword[3])
/// DEST.xmm[i].dword[0] := C[0]
/// DEST.xmm[i].dword[1] := C[1]
/// DEST.xmm[i].dword[2] := C[2]
/// DEST.xmm[i].dword[3] := C[3]
/// ENDFOR
/// DEST[MAX:256] := 0
/// \endcode
#define _mm256_sm4rnds4_epi32(A, B) \
(__m256i) __builtin_ia32_vsm4rnds4256((__v8su)A, (__v8su)B)
#endif // __SM4INTRIN_H
+5
View File
@@ -10,6 +10,10 @@
#ifndef __STDALIGN_H
#define __STDALIGN_H
/* FIXME: This is using the placeholder dates Clang produces for these macros
in C2x mode; switch to the correct values once they've been published. */
#if defined(__cplusplus) || \
(defined(__STDC_VERSION__) && __STDC_VERSION__ < 202000L)
#ifndef __cplusplus
#define alignas _Alignas
#define alignof _Alignof
@@ -17,5 +21,6 @@
#define __alignas_is_defined 1
#define __alignof_is_defined 1
#endif /* __STDC_VERSION__ */
#endif /* __STDALIGN_H */
+9 -2
View File
@@ -45,9 +45,16 @@ extern "C" {
#define ATOMIC_POINTER_LOCK_FREE __CLANG_ATOMIC_POINTER_LOCK_FREE
/* 7.17.2 Initialization */
/* FIXME: This is using the placeholder dates Clang produces for these macros
in C2x mode; switch to the correct values once they've been published. */
#if (defined(__STDC_VERSION__) && __STDC_VERSION__ < 202000L) || \
defined(__cplusplus)
/* ATOMIC_VAR_INIT was removed in C2x, but still remains in C++23. */
#define ATOMIC_VAR_INIT(value) (value)
#if ((defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201710L) || \
#endif
#if ((defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201710L && \
__STDC_VERSION__ < 202000L) || \
(defined(__cplusplus) && __cplusplus >= 202002L)) && \
!defined(_CLANG_DISABLE_CRT_DEPRECATION_WARNINGS)
/* ATOMIC_VAR_INIT was deprecated in C17 and C++20. */
+5
View File
@@ -103,6 +103,11 @@ using ::std::nullptr_t;
typedef typeof(nullptr) nullptr_t;
#endif /* defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202000L */
#if defined(__need_STDDEF_H_misc) && defined(__STDC_VERSION__) && \
__STDC_VERSION__ >= 202000L
#define unreachable() __builtin_unreachable()
#endif /* defined(__need_STDDEF_H_misc) && >= C23 */
#if defined(__need_STDDEF_H_misc)
#if (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L) || \
(defined(__cplusplus) && __cplusplus >= 201103L)
+132 -12
View File
@@ -961,17 +961,17 @@ static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_popcnt(v128_t __a) {
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_shl(v128_t __a,
uint32_t __b) {
return (v128_t)((__i8x16)__a << __b);
return (v128_t)((__i8x16)__a << (__b & 0x7));
}
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_shr(v128_t __a,
uint32_t __b) {
return (v128_t)((__i8x16)__a >> __b);
return (v128_t)((__i8x16)__a >> (__b & 0x7));
}
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u8x16_shr(v128_t __a,
uint32_t __b) {
return (v128_t)((__u8x16)__a >> __b);
return (v128_t)((__u8x16)__a >> (__b & 0x7));
}
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_add(v128_t __a,
@@ -1047,17 +1047,17 @@ static __inline__ uint32_t __DEFAULT_FN_ATTRS wasm_i16x8_bitmask(v128_t __a) {
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_shl(v128_t __a,
uint32_t __b) {
return (v128_t)((__i16x8)__a << __b);
return (v128_t)((__i16x8)__a << (__b & 0xF));
}
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_shr(v128_t __a,
uint32_t __b) {
return (v128_t)((__i16x8)__a >> __b);
return (v128_t)((__i16x8)__a >> (__b & 0xF));
}
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u16x8_shr(v128_t __a,
uint32_t __b) {
return (v128_t)((__u16x8)__a >> __b);
return (v128_t)((__u16x8)__a >> (__b & 0xF));
}
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_add(v128_t __a,
@@ -1138,17 +1138,17 @@ static __inline__ uint32_t __DEFAULT_FN_ATTRS wasm_i32x4_bitmask(v128_t __a) {
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_shl(v128_t __a,
uint32_t __b) {
return (v128_t)((__i32x4)__a << __b);
return (v128_t)((__i32x4)__a << (__b & 0x1F));
}
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_shr(v128_t __a,
uint32_t __b) {
return (v128_t)((__i32x4)__a >> __b);
return (v128_t)((__i32x4)__a >> (__b & 0x1F));
}
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u32x4_shr(v128_t __a,
uint32_t __b) {
return (v128_t)((__u32x4)__a >> __b);
return (v128_t)((__u32x4)__a >> (__b & 0x1F));
}
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_add(v128_t __a,
@@ -1209,17 +1209,17 @@ static __inline__ uint32_t __DEFAULT_FN_ATTRS wasm_i64x2_bitmask(v128_t __a) {
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_shl(v128_t __a,
uint32_t __b) {
return (v128_t)((__i64x2)__a << (int64_t)__b);
return (v128_t)((__i64x2)__a << ((int64_t)__b & 0x3F));
}
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_shr(v128_t __a,
uint32_t __b) {
return (v128_t)((__i64x2)__a >> (int64_t)__b);
return (v128_t)((__i64x2)__a >> ((int64_t)__b & 0x3F));
}
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u64x2_shr(v128_t __a,
uint32_t __b) {
return (v128_t)((__u64x2)__a >> (int64_t)__b);
return (v128_t)((__u64x2)__a >> ((int64_t)__b & 0x3F));
}
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_add(v128_t __a,
@@ -1760,6 +1760,126 @@ wasm_u64x2_load_32x2(const void *__mem) {
__DEPRECATED_WASM_MACRO("wasm_v64x2_shuffle", "wasm_i64x2_shuffle") \
wasm_i64x2_shuffle(__a, __b, __c0, __c1)
// Relaxed SIMD intrinsics
#define __RELAXED_FN_ATTRS \
__attribute__((__always_inline__, __nodebug__, __target__("relaxed-simd"), \
__min_vector_width__(128)))
static __inline__ v128_t __RELAXED_FN_ATTRS
wasm_f32x4_relaxed_madd(v128_t __a, v128_t __b, v128_t __c) {
return (v128_t)__builtin_wasm_relaxed_madd_f32x4((__f32x4)__a, (__f32x4)__b,
(__f32x4)__c);
}
static __inline__ v128_t __RELAXED_FN_ATTRS
wasm_f32x4_relaxed_nmadd(v128_t __a, v128_t __b, v128_t __c) {
return (v128_t)__builtin_wasm_relaxed_nmadd_f32x4((__f32x4)__a, (__f32x4)__b,
(__f32x4)__c);
}
static __inline__ v128_t __RELAXED_FN_ATTRS
wasm_f64x2_relaxed_madd(v128_t __a, v128_t __b, v128_t __c) {
return (v128_t)__builtin_wasm_relaxed_madd_f64x2((__f64x2)__a, (__f64x2)__b,
(__f64x2)__c);
}
static __inline__ v128_t __RELAXED_FN_ATTRS
wasm_f64x2_relaxed_nmadd(v128_t __a, v128_t __b, v128_t __c) {
return (v128_t)__builtin_wasm_relaxed_nmadd_f64x2((__f64x2)__a, (__f64x2)__b,
(__f64x2)__c);
}
static __inline__ v128_t __RELAXED_FN_ATTRS
wasm_i8x16_relaxed_laneselect(v128_t __a, v128_t __b, v128_t __m) {
return (v128_t)__builtin_wasm_relaxed_laneselect_i8x16(
(__i8x16)__a, (__i8x16)__b, (__i8x16)__m);
}
static __inline__ v128_t __RELAXED_FN_ATTRS
wasm_i16x8_relaxed_laneselect(v128_t __a, v128_t __b, v128_t __m) {
return (v128_t)__builtin_wasm_relaxed_laneselect_i16x8(
(__i16x8)__a, (__i16x8)__b, (__i16x8)__m);
}
static __inline__ v128_t __RELAXED_FN_ATTRS
wasm_i32x4_relaxed_laneselect(v128_t __a, v128_t __b, v128_t __m) {
return (v128_t)__builtin_wasm_relaxed_laneselect_i32x4(
(__i32x4)__a, (__i32x4)__b, (__i32x4)__m);
}
static __inline__ v128_t __RELAXED_FN_ATTRS
wasm_i64x2_relaxed_laneselect(v128_t __a, v128_t __b, v128_t __m) {
return (v128_t)__builtin_wasm_relaxed_laneselect_i64x2(
(__i64x2)__a, (__i64x2)__b, (__i64x2)__m);
}
static __inline__ v128_t __RELAXED_FN_ATTRS
wasm_i8x16_relaxed_swizzle(v128_t __a, v128_t __s) {
return (v128_t)__builtin_wasm_relaxed_swizzle_i8x16((__i8x16)__a,
(__i8x16)__s);
}
static __inline__ v128_t __RELAXED_FN_ATTRS wasm_f32x4_relaxed_min(v128_t __a,
v128_t __b) {
return (v128_t)__builtin_wasm_relaxed_min_f32x4((__f32x4)__a, (__f32x4)__b);
}
static __inline__ v128_t __RELAXED_FN_ATTRS wasm_f32x4_relaxed_max(v128_t __a,
v128_t __b) {
return (v128_t)__builtin_wasm_relaxed_max_f32x4((__f32x4)__a, (__f32x4)__b);
}
static __inline__ v128_t __RELAXED_FN_ATTRS wasm_f64x2_relaxed_min(v128_t __a,
v128_t __b) {
return (v128_t)__builtin_wasm_relaxed_min_f64x2((__f64x2)__a, (__f64x2)__b);
}
static __inline__ v128_t __RELAXED_FN_ATTRS wasm_f64x2_relaxed_max(v128_t __a,
v128_t __b) {
return (v128_t)__builtin_wasm_relaxed_max_f64x2((__f64x2)__a, (__f64x2)__b);
}
static __inline__ v128_t __RELAXED_FN_ATTRS
wasm_i32x4_relaxed_trunc_f32x4(v128_t __a) {
return (v128_t)__builtin_wasm_relaxed_trunc_s_i32x4_f32x4((__f32x4)__a);
}
static __inline__ v128_t __RELAXED_FN_ATTRS
wasm_u32x4_relaxed_trunc_f32x4(v128_t __a) {
return (v128_t)__builtin_wasm_relaxed_trunc_u_i32x4_f32x4((__f32x4)__a);
}
static __inline__ v128_t __RELAXED_FN_ATTRS
wasm_i32x4_relaxed_trunc_f64x2_zero(v128_t __a) {
return (v128_t)__builtin_wasm_relaxed_trunc_s_zero_i32x4_f64x2((__f64x2)__a);
}
static __inline__ v128_t __RELAXED_FN_ATTRS
wasm_u32x4_relaxed_trunc_f64x2_zero(v128_t __a) {
return (v128_t)__builtin_wasm_relaxed_trunc_u_zero_i32x4_f64x2((__f64x2)__a);
}
static __inline__ v128_t __RELAXED_FN_ATTRS
wasm_i16x8_relaxed_q15mulr(v128_t __a, v128_t __b) {
return (v128_t)__builtin_wasm_relaxed_q15mulr_s_i16x8((__i16x8)__a,
(__i16x8)__b);
}
static __inline__ v128_t __RELAXED_FN_ATTRS
wasm_i16x8_relaxed_dot_i8x16_i7x16(v128_t __a, v128_t __b) {
return (v128_t)__builtin_wasm_relaxed_dot_i8x16_i7x16_s_i16x8((__i8x16)__a,
(__i8x16)__b);
}
static __inline__ v128_t __RELAXED_FN_ATTRS
wasm_i32x4_relaxed_dot_i8x16_i7x16_add(v128_t __a, v128_t __b, v128_t __c) {
return (v128_t)__builtin_wasm_relaxed_dot_i8x16_i7x16_add_s_i32x4(
(__i8x16)__a, (__i8x16)__b, (__i32x4)__c);
}
// Deprecated intrinsics
static __inline__ v128_t __DEPRECATED_FN_ATTRS("wasm_i8x16_swizzle")
wasm_v8x16_swizzle(v128_t __a, v128_t __b) {
return wasm_i8x16_swizzle(__a, __b);
+50
View File
@@ -17,12 +17,62 @@
/* Define the default attributes for the functions in this file. */
#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("xsavec")))
/// Performs a full or partial save of processor state to the memory at
/// \a __p. The exact state saved depends on the 64-bit mask \a __m and
/// processor control register \c XCR0.
///
/// \code{.operation}
/// mask[62:0] := __m[62:0] AND XCR0[62:0]
/// FOR i := 0 TO 62
/// IF mask[i] == 1
/// CASE (i) OF
/// 0: save X87 FPU state
/// 1: save SSE state
/// DEFAULT: __p.Ext_Save_Area[i] := ProcessorState[i]
/// FI
/// ENDFOR
/// __p.Header.XSTATE_BV[62:0] := INIT_FUNCTION(mask[62:0])
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c XSAVEC instruction.
///
/// \param __p
/// Pointer to the save area; must be 64-byte aligned.
/// \param __m
/// A 64-bit mask indicating what state should be saved.
static __inline__ void __DEFAULT_FN_ATTRS
_xsavec(void *__p, unsigned long long __m) {
__builtin_ia32_xsavec(__p, __m);
}
#ifdef __x86_64__
/// Performs a full or partial save of processor state to the memory at
/// \a __p. The exact state saved depends on the 64-bit mask \a __m and
/// processor control register \c XCR0.
///
/// \code{.operation}
/// mask[62:0] := __m[62:0] AND XCR0[62:0]
/// FOR i := 0 TO 62
/// IF mask[i] == 1
/// CASE (i) OF
/// 0: save X87 FPU state
/// 1: save SSE state
/// DEFAULT: __p.Ext_Save_Area[i] := ProcessorState[i]
/// FI
/// ENDFOR
/// __p.Header.XSTATE_BV[62:0] := INIT_FUNCTION(mask[62:0])
/// \endcode
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the \c XSAVEC64 instruction.
///
/// \param __p
/// Pointer to the save area; must be 64-byte aligned.
/// \param __m
/// A 64-bit mask indicating what state should be saved.
static __inline__ void __DEFAULT_FN_ATTRS
_xsavec64(void *__p, unsigned long long __m) {
__builtin_ia32_xsavec64(__p, __m);