Fix ICE on offsetted ZST pointer

A grep for `const_usize.*align` found the same code copied to
rustc_codegen_gcc but I don't see other cases where we get this wrong.
This commit is contained in:
Mark Rousskov
2025-10-10 21:29:29 -04:00
parent ff6dc928c5
commit a8c79b876b
4 changed files with 32 additions and 9 deletions
@@ -5,7 +5,9 @@
use cranelift_module::*;
use rustc_data_structures::fx::FxHashSet;
use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
use rustc_middle::mir::interpret::{AllocId, GlobalAlloc, Scalar, read_target_uint};
use rustc_middle::mir::interpret::{
AllocId, GlobalAlloc, PointerArithmetic, Scalar, read_target_uint,
};
use rustc_middle::ty::{ExistentialTraitRef, ScalarInt};
use crate::prelude::*;
@@ -138,8 +140,11 @@ pub(crate) fn codegen_const_value<'tcx>(
let base_addr = match fx.tcx.global_alloc(alloc_id) {
GlobalAlloc::Memory(alloc) => {
if alloc.inner().len() == 0 {
assert_eq!(offset, Size::ZERO);
fx.bcx.ins().iconst(fx.pointer_type, alloc.inner().align.bytes() as i64)
let val = alloc.inner().align.bytes().wrapping_add(offset.bytes());
fx.bcx.ins().iconst(
fx.pointer_type,
fx.tcx.truncate_to_target_usize(val) as i64,
)
} else {
let data_id = data_id_for_alloc_id(
&mut fx.constants_cx,
+3 -3
View File
@@ -5,7 +5,7 @@
BaseTypeCodegenMethods, ConstCodegenMethods, MiscCodegenMethods, StaticCodegenMethods,
};
use rustc_middle::mir::Mutability;
use rustc_middle::mir::interpret::{ConstAllocation, GlobalAlloc, Scalar};
use rustc_middle::mir::interpret::{ConstAllocation, GlobalAlloc, PointerArithmetic, Scalar};
use rustc_middle::ty::layout::LayoutOf;
use crate::context::CodegenCx;
@@ -247,8 +247,8 @@ fn scalar_to_backend(&self, cv: Scalar, layout: abi::Scalar, ty: Type<'gcc>) ->
// This avoids generating a zero-sized constant value and actually needing a
// real address at runtime.
if alloc.inner().len() == 0 {
assert_eq!(offset.bytes(), 0);
let val = self.const_usize(alloc.inner().align.bytes());
let val = alloc.inner().align.bytes().wrapping_add(offset.bytes());
let val = self.const_usize(self.tcx.truncate_to_target_usize(val));
return if matches!(layout.primitive(), Pointer(_)) {
self.context.new_cast(None, val, ty)
} else {
+3 -3
View File
@@ -12,7 +12,7 @@
use rustc_hashes::Hash128;
use rustc_hir::def_id::DefId;
use rustc_middle::bug;
use rustc_middle::mir::interpret::{ConstAllocation, GlobalAlloc, Scalar};
use rustc_middle::mir::interpret::{ConstAllocation, GlobalAlloc, PointerArithmetic, Scalar};
use rustc_middle::ty::TyCtxt;
use rustc_session::cstore::DllImport;
use tracing::debug;
@@ -281,8 +281,8 @@ fn scalar_to_backend(&self, cv: Scalar, layout: abi::Scalar, llty: &'ll Type) ->
// This avoids generating a zero-sized constant value and actually needing a
// real address at runtime.
if alloc.inner().len() == 0 {
assert_eq!(offset.bytes(), 0);
let llval = self.const_usize(alloc.inner().align.bytes());
let val = alloc.inner().align.bytes().wrapping_add(offset.bytes());
let llval = self.const_usize(self.tcx.truncate_to_target_usize(val));
return if matches!(layout.primitive(), Pointer(_)) {
unsafe { llvm::LLVMConstIntToPtr(llval, llty) }
} else {
+18
View File
@@ -1,10 +1,21 @@
//@ run-pass
// We need some non-1 alignment to test we use the alignment of the type in the compiler.
#[repr(align(4))]
struct Foo;
static FOO: Foo = Foo;
// This tests for regression of https://github.com/rust-lang/rust/issues/147516
//
// The compiler will codegen `&Zst` without creating a real allocation, just a properly aligned
// `usize` (i.e., ptr::dangling). However, code can add an arbitrary offset from that base
// allocation. We confirm here that we correctly codegen that offset combined with the necessary
// alignment of the base &() as a 1-ZST and &Foo as a 4-ZST.
const A: *const () = (&() as *const ()).wrapping_byte_add(2);
const B: *const () = (&Foo as *const _ as *const ()).wrapping_byte_add(usize::MAX);
const C: *const () = (&Foo as *const _ as *const ()).wrapping_byte_add(2);
fn main() {
// There's no stable guarantee that these are true.
// However, we want them to be true so that our LLVM IR and runtime are a bit faster:
@@ -15,6 +26,13 @@ fn main() {
let x: &'static Foo = &Foo;
assert_eq!(x as *const Foo as usize, 4);
// * A 1-aligned ZST (1-ZST) is placed at 0x1. Then offsetting that by 2 results in 3.
// * Foo is a 4-aligned ZST, so is placed at 0x4. +2 = 6
// * Foo is a 4-aligned ZST, so is placed at 0x4. +usize::MAX = -1 (same bit pattern) = 3
assert_eq!(A.addr(), 3);
assert_eq!(B.addr(), 3);
assert_eq!(C.addr(), 6);
// The exact addresses returned by these library functions are not necessarily stable guarantees
// but for now we assert that we're still matching.
#[allow(dangling_pointers_from_temporaries)]