Merge pull request #2048 from sayantn/movrs

Add `movrs` intrinsics
This commit is contained in:
Folkert de Vries
2026-03-06 16:50:55 +00:00
committed by GitHub
6 changed files with 129 additions and 2 deletions
+2 -1
View File
@@ -39,7 +39,8 @@
const_trait_impl,
const_cmp,
const_eval_select,
maybe_uninit_as_bytes
maybe_uninit_as_bytes,
movrs_target_feature
)]
#![cfg_attr(test, feature(test, abi_vectorcall, stdarch_internal))]
#![deny(clippy::missing_inline_in_public_items)]
@@ -774,3 +774,7 @@ pub(crate) const fn $as_from(self) -> $from {
mod kl;
#[stable(feature = "keylocker_x86", since = "1.89.0")]
pub use self::kl::*;
mod movrs;
#[unstable(feature = "movrs_target_feature", issue = "137976")]
pub use self::movrs::*;
@@ -0,0 +1,23 @@
//! Read-shared move intrinsics
#[cfg(test)]
use stdarch_test::assert_instr;
unsafe extern "unadjusted" {
#[link_name = "llvm.x86.prefetchrs"]
fn prefetchrs(p: *const u8);
}
/// Prefetches the cache line that contains address `p`, with an indication that the source memory
/// location is likely to become read-shared by multiple processors, i.e., read in the future by at
/// least one other processor before it is written, assuming it is ever written in the future.
///
/// Note: this intrinsic is safe to use even though it takes a raw pointer argument. In general, this
/// cannot change the behavior of the program, including not trapping on invalid pointers.
#[inline]
#[target_feature(enable = "movrs")]
#[cfg_attr(all(test, not(target_vendor = "apple")), assert_instr(prefetchrst2))]
#[unstable(feature = "movrs_target_feature", issue = "137976")]
pub fn _m_prefetchrs(p: *const u8) {
unsafe { prefetchrs(p) }
}
@@ -81,3 +81,7 @@
mod amx;
#[unstable(feature = "x86_amx_intrinsics", issue = "126622")]
pub use self::amx::*;
mod movrs;
#[unstable(feature = "movrs_target_feature", issue = "137976")]
pub use self::movrs::*;
@@ -0,0 +1,94 @@
//! Read-shared Move instructions
#[cfg(test)]
use stdarch_test::assert_instr;
unsafe extern "unadjusted" {
#[link_name = "llvm.x86.movrsqi"]
fn movrsqi(src: *const i8) -> i8;
#[link_name = "llvm.x86.movrshi"]
fn movrshi(src: *const i16) -> i16;
#[link_name = "llvm.x86.movrssi"]
fn movrssi(src: *const i32) -> i32;
#[link_name = "llvm.x86.movrsdi"]
fn movrsdi(src: *const i64) -> i64;
}
/// Moves a byte from the source to the destination, with an indication that the source memory
/// location is likely to become read-shared by multiple processors, i.e., read in the future by at
/// least one other processor before it is written, assuming it is ever written in the future.
#[inline]
#[target_feature(enable = "movrs")]
#[cfg_attr(all(test, not(target_vendor = "apple")), assert_instr(movrs))]
#[unstable(feature = "movrs_target_feature", issue = "137976")]
pub unsafe fn _movrs_i8(src: *const i8) -> i8 {
movrsqi(src)
}
/// Moves a 16-bit word from the source to the destination, with an indication that the source memory
/// location is likely to become read-shared by multiple processors, i.e., read in the future by at
/// least one other processor before it is written, assuming it is ever written in the future.
#[inline]
#[target_feature(enable = "movrs")]
#[cfg_attr(all(test, not(target_vendor = "apple")), assert_instr(movrs))]
#[unstable(feature = "movrs_target_feature", issue = "137976")]
pub unsafe fn _movrs_i16(src: *const i16) -> i16 {
movrshi(src)
}
/// Moves a 32-bit doubleword from the source to the destination, with an indication that the source
/// memory location is likely to become read-shared by multiple processors, i.e., read in the future
/// by at least one other processor before it is written, assuming it is ever written in the future.
#[inline]
#[target_feature(enable = "movrs")]
#[cfg_attr(all(test, not(target_vendor = "apple")), assert_instr(movrs))]
#[unstable(feature = "movrs_target_feature", issue = "137976")]
pub unsafe fn _movrs_i32(src: *const i32) -> i32 {
movrssi(src)
}
/// Moves a 64-bit quadword from the source to the destination, with an indication that the source
/// memory location is likely to become read-shared by multiple processors, i.e., read in the future
/// by at least one other processor before it is written, assuming it is ever written in the future.
#[inline]
#[target_feature(enable = "movrs")]
#[cfg_attr(all(test, not(target_vendor = "apple")), assert_instr(movrs))]
#[unstable(feature = "movrs_target_feature", issue = "137976")]
pub unsafe fn _movrs_i64(src: *const i64) -> i64 {
movrsdi(src)
}
#[cfg(test)]
mod tests {
use stdarch_test::simd_test;
use super::*;
#[simd_test(enable = "movrs")]
fn test_movrs_i8() {
let x: i8 = 42;
let y = unsafe { _movrs_i8(&x) };
assert_eq!(x, y);
}
#[simd_test(enable = "movrs")]
fn test_movrs_i16() {
let x: i16 = 42;
let y = unsafe { _movrs_i16(&x) };
assert_eq!(x, y);
}
#[simd_test(enable = "movrs")]
fn test_movrs_i32() {
let x: i32 = 42;
let y = unsafe { _movrs_i32(&x) };
assert_eq!(x, y);
}
#[simd_test(enable = "movrs")]
fn test_movrs_i64() {
let x: i64 = 42;
let y = unsafe { _movrs_i64(&x) };
assert_eq!(x, y);
}
}
@@ -211,6 +211,7 @@ fn verify_all_signatures() {
"_rdseed64_step",
// Prefetch
"_mm_prefetch",
"_m_prefetchrs",
// CMPXCHG
"cmpxchg16b",
// Undefined
@@ -305,7 +306,7 @@ fn verify_all_signatures() {
}
// FIXME: these have not been added to Intrinsics Guide yet
if ["amx-avx512", "amx-fp8", "amx-movrs", "amx-tf32"]
if ["amx-avx512", "amx-fp8", "amx-movrs", "amx-tf32", "movrs"]
.iter()
.any(|f| feature.contains(f))
{