diff --git a/compiler/rustc_codegen_cranelift/src/constant.rs b/compiler/rustc_codegen_cranelift/src/constant.rs index 293459cc11c2f..3243e12e69999 100644 --- a/compiler/rustc_codegen_cranelift/src/constant.rs +++ b/compiler/rustc_codegen_cranelift/src/constant.rs @@ -5,7 +5,9 @@ use std::cmp::Ordering; use cranelift_module::*; use rustc_data_structures::fx::FxHashSet; use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags; -use rustc_middle::mir::interpret::{AllocId, GlobalAlloc, Scalar, read_target_uint}; +use rustc_middle::mir::interpret::{ + AllocId, GlobalAlloc, PointerArithmetic, Scalar, read_target_uint, +}; use rustc_middle::ty::{ExistentialTraitRef, ScalarInt}; use crate::prelude::*; @@ -138,8 +140,11 @@ pub(crate) fn codegen_const_value<'tcx>( let base_addr = match fx.tcx.global_alloc(alloc_id) { GlobalAlloc::Memory(alloc) => { if alloc.inner().len() == 0 { - assert_eq!(offset, Size::ZERO); - fx.bcx.ins().iconst(fx.pointer_type, alloc.inner().align.bytes() as i64) + let val = alloc.inner().align.bytes().wrapping_add(offset.bytes()); + fx.bcx.ins().iconst( + fx.pointer_type, + fx.tcx.truncate_to_target_usize(val) as i64, + ) } else { let data_id = data_id_for_alloc_id( &mut fx.constants_cx, diff --git a/compiler/rustc_codegen_gcc/src/common.rs b/compiler/rustc_codegen_gcc/src/common.rs index 28848ca61845c..7c2969e587186 100644 --- a/compiler/rustc_codegen_gcc/src/common.rs +++ b/compiler/rustc_codegen_gcc/src/common.rs @@ -5,7 +5,7 @@ use rustc_codegen_ssa::traits::{ BaseTypeCodegenMethods, ConstCodegenMethods, MiscCodegenMethods, StaticCodegenMethods, }; use rustc_middle::mir::Mutability; -use rustc_middle::mir::interpret::{ConstAllocation, GlobalAlloc, Scalar}; +use rustc_middle::mir::interpret::{ConstAllocation, GlobalAlloc, PointerArithmetic, Scalar}; use rustc_middle::ty::layout::LayoutOf; use crate::context::CodegenCx; @@ -247,8 +247,8 @@ impl<'gcc, 'tcx> ConstCodegenMethods for CodegenCx<'gcc, 'tcx> { // This avoids generating a zero-sized constant value and actually needing a // real address at runtime. if alloc.inner().len() == 0 { - assert_eq!(offset.bytes(), 0); - let val = self.const_usize(alloc.inner().align.bytes()); + let val = alloc.inner().align.bytes().wrapping_add(offset.bytes()); + let val = self.const_usize(self.tcx.truncate_to_target_usize(val)); return if matches!(layout.primitive(), Pointer(_)) { self.context.new_cast(None, val, ty) } else { diff --git a/compiler/rustc_codegen_llvm/src/common.rs b/compiler/rustc_codegen_llvm/src/common.rs index 175fc8535ac3f..b0cf9925019d2 100644 --- a/compiler/rustc_codegen_llvm/src/common.rs +++ b/compiler/rustc_codegen_llvm/src/common.rs @@ -12,7 +12,7 @@ use rustc_data_structures::stable_hasher::{HashStable, StableHasher}; use rustc_hashes::Hash128; use rustc_hir::def_id::DefId; use rustc_middle::bug; -use rustc_middle::mir::interpret::{ConstAllocation, GlobalAlloc, Scalar}; +use rustc_middle::mir::interpret::{ConstAllocation, GlobalAlloc, PointerArithmetic, Scalar}; use rustc_middle::ty::TyCtxt; use rustc_session::cstore::DllImport; use tracing::debug; @@ -281,8 +281,8 @@ impl<'ll, 'tcx> ConstCodegenMethods for CodegenCx<'ll, 'tcx> { // This avoids generating a zero-sized constant value and actually needing a // real address at runtime. if alloc.inner().len() == 0 { - assert_eq!(offset.bytes(), 0); - let llval = self.const_usize(alloc.inner().align.bytes()); + let val = alloc.inner().align.bytes().wrapping_add(offset.bytes()); + let llval = self.const_usize(self.tcx.truncate_to_target_usize(val)); return if matches!(layout.primitive(), Pointer(_)) { unsafe { llvm::LLVMConstIntToPtr(llval, llty) } } else { diff --git a/tests/ui/consts/zst_no_llvm_alloc.rs b/tests/ui/consts/zst_no_llvm_alloc.rs index 1e92e3bbd4c1b..cbfd839dc42fd 100644 --- a/tests/ui/consts/zst_no_llvm_alloc.rs +++ b/tests/ui/consts/zst_no_llvm_alloc.rs @@ -1,10 +1,21 @@ //@ run-pass +// We need some non-1 alignment to test we use the alignment of the type in the compiler. #[repr(align(4))] struct Foo; static FOO: Foo = Foo; +// This tests for regression of https://github.com/rust-lang/rust/issues/147516 +// +// THe compiler will codegen `&Zst` without creating a real allocation, just a properly aligned +// `usize` (i.e., ptr::dangling). However, code can add an arbitrary offset from that base +// allocation. We confirm here that we correctly codegen that offset combined with the necessary +// alignment of the base &() as a 1-ZST and &Foo as a 4-ZST. +const A: *const () = (&() as *const ()).wrapping_byte_add(2); +const B: *const () = (&Foo as *const _ as *const ()).wrapping_byte_add(usize::MAX); +const C: *const () = (&Foo as *const _ as *const ()).wrapping_byte_add(2); + fn main() { // There's no stable guarantee that these are true. // However, we want them to be true so that our LLVM IR and runtime are a bit faster: @@ -15,6 +26,13 @@ fn main() { let x: &'static Foo = &Foo; assert_eq!(x as *const Foo as usize, 4); + // * A 1-aligned ZST (1-ZST) is placed at 0x1. Then offsetting that by 2 results in 3. + // * Foo is a 4-aligned ZST, so is placed at 0x4. +2 = 6 + // * Foo is a 4-aligned ZST, so is placed at 0x4. +usize::MAX = -1 (same bit pattern) = 3 + assert_eq!(A.addr(), 3); + assert_eq!(B.addr(), 3); + assert_eq!(C.addr(), 6); + // The exact addresses returned by these library functions are not necessarily stable guarantees // but for now we assert that we're still matching. #[allow(dangling_pointers_from_temporaries)]