From fa53de656ec9318d9462dcadcfd851685c3dcaf1 Mon Sep 17 00:00:00 2001 From: Martin Nordholts Date: Sat, 27 Sep 2025 19:40:48 +0200 Subject: [PATCH 1/3] tests: Remove ignore-android directive for fixed issue --- tests/ui/test-attrs/test-panic-abort-nocapture.rs | 1 - tests/ui/test-attrs/test-panic-abort-nocapture.run.stderr | 4 ++-- tests/ui/test-attrs/test-panic-abort.rs | 1 - tests/ui/test-attrs/test-panic-abort.run.stdout | 2 +- 4 files changed, 3 insertions(+), 5 deletions(-) diff --git a/tests/ui/test-attrs/test-panic-abort-nocapture.rs b/tests/ui/test-attrs/test-panic-abort-nocapture.rs index 6a1025ea087c0..7c78d432fa087 100644 --- a/tests/ui/test-attrs/test-panic-abort-nocapture.rs +++ b/tests/ui/test-attrs/test-panic-abort-nocapture.rs @@ -6,7 +6,6 @@ //@ exec-env:RUST_BACKTRACE=0 //@ normalize-stdout: "finished in \d+\.\d+s" -> "finished in $$TIME" -//@ ignore-android #120567 //@ needs-subprocess #![cfg(test)] diff --git a/tests/ui/test-attrs/test-panic-abort-nocapture.run.stderr b/tests/ui/test-attrs/test-panic-abort-nocapture.run.stderr index 8d7c62f8ec700..d8f65a78261f8 100644 --- a/tests/ui/test-attrs/test-panic-abort-nocapture.run.stderr +++ b/tests/ui/test-attrs/test-panic-abort-nocapture.run.stderr @@ -1,11 +1,11 @@ -thread 'main' ($TID) panicked at $DIR/test-panic-abort-nocapture.rs:32:5: +thread 'main' ($TID) panicked at $DIR/test-panic-abort-nocapture.rs:31:5: assertion `left == right` failed left: 2 right: 4 note: run with `RUST_BACKTRACE=1` environment variable to display a backtrace -thread 'main' ($TID) panicked at $DIR/test-panic-abort-nocapture.rs:26:5: +thread 'main' ($TID) panicked at $DIR/test-panic-abort-nocapture.rs:25:5: assertion `left == right` failed left: 2 right: 4 diff --git a/tests/ui/test-attrs/test-panic-abort.rs b/tests/ui/test-attrs/test-panic-abort.rs index 6c9b641fb6fba..13a30223399e5 100644 --- a/tests/ui/test-attrs/test-panic-abort.rs +++ b/tests/ui/test-attrs/test-panic-abort.rs @@ -6,7 +6,6 @@ //@ exec-env:RUST_BACKTRACE=0 //@ normalize-stdout: "finished in \d+\.\d+s" -> "finished in $$TIME" -//@ ignore-android #120567 //@ needs-subprocess #![cfg(test)] diff --git a/tests/ui/test-attrs/test-panic-abort.run.stdout b/tests/ui/test-attrs/test-panic-abort.run.stdout index 4d65c05b94402..ca247f7da4180 100644 --- a/tests/ui/test-attrs/test-panic-abort.run.stdout +++ b/tests/ui/test-attrs/test-panic-abort.run.stdout @@ -18,7 +18,7 @@ testing123 ---- it_fails stderr ---- testing321 -thread 'main' ($TID) panicked at $DIR/test-panic-abort.rs:37:5: +thread 'main' ($TID) panicked at $DIR/test-panic-abort.rs:36:5: assertion `left == right` failed left: 2 right: 5 From b3f3e36c72952736be5b9e0360ee5b86148f2c29 Mon Sep 17 00:00:00 2001 From: Jubilee Young Date: Sat, 27 Sep 2025 20:00:54 -0700 Subject: [PATCH 2/3] compiler: remove AbiAlign inside TargetDataLayout This maintains AbiAlign usage in public API and most of the compiler, but direct access of these fields is now in terms of Align only. --- compiler/rustc_abi/src/callconv/reg.rs | 22 ++--- compiler/rustc_abi/src/layout.rs | 65 +++++++------- compiler/rustc_abi/src/layout/simple.rs | 27 +++--- compiler/rustc_abi/src/lib.rs | 85 +++++++++---------- compiler/rustc_codegen_llvm/src/consts.rs | 4 +- compiler/rustc_codegen_llvm/src/intrinsic.rs | 2 +- compiler/rustc_codegen_llvm/src/va_arg.rs | 8 +- compiler/rustc_session/src/config/cfg.rs | 10 +-- compiler/rustc_target/src/callconv/mips.rs | 2 +- compiler/rustc_target/src/callconv/mips64.rs | 4 +- compiler/rustc_target/src/callconv/mod.rs | 2 +- compiler/rustc_target/src/callconv/sparc.rs | 2 +- compiler/rustc_target/src/callconv/sparc64.rs | 2 +- 13 files changed, 115 insertions(+), 120 deletions(-) diff --git a/compiler/rustc_abi/src/callconv/reg.rs b/compiler/rustc_abi/src/callconv/reg.rs index 8cf140dbaad4e..66c8056d0c2af 100644 --- a/compiler/rustc_abi/src/callconv/reg.rs +++ b/compiler/rustc_abi/src/callconv/reg.rs @@ -42,22 +42,22 @@ impl Reg { let dl = cx.data_layout(); match self.kind { RegKind::Integer => match self.size.bits() { - 1 => dl.i1_align.abi, - 2..=8 => dl.i8_align.abi, - 9..=16 => dl.i16_align.abi, - 17..=32 => dl.i32_align.abi, - 33..=64 => dl.i64_align.abi, - 65..=128 => dl.i128_align.abi, + 1 => dl.i1_align, + 2..=8 => dl.i8_align, + 9..=16 => dl.i16_align, + 17..=32 => dl.i32_align, + 33..=64 => dl.i64_align, + 65..=128 => dl.i128_align, _ => panic!("unsupported integer: {self:?}"), }, RegKind::Float => match self.size.bits() { - 16 => dl.f16_align.abi, - 32 => dl.f32_align.abi, - 64 => dl.f64_align.abi, - 128 => dl.f128_align.abi, + 16 => dl.f16_align, + 32 => dl.f32_align, + 64 => dl.f64_align, + 128 => dl.f128_align, _ => panic!("unsupported float: {self:?}"), }, - RegKind::Vector => dl.llvmlike_vector_align(self.size).abi, + RegKind::Vector => dl.llvmlike_vector_align(self.size), } } } diff --git a/compiler/rustc_abi/src/layout.rs b/compiler/rustc_abi/src/layout.rs index 5004d0c80220f..09b4322e299b8 100644 --- a/compiler/rustc_abi/src/layout.rs +++ b/compiler/rustc_abi/src/layout.rs @@ -174,11 +174,11 @@ impl LayoutCalculator { // Non-power-of-two vectors have padding up to the next power-of-two. // If we're a packed repr, remove the padding while keeping the alignment as close // to a vector as possible. - (BackendRepr::Memory { sized: true }, AbiAlign { abi: Align::max_aligned_factor(size) }) + (BackendRepr::Memory { sized: true }, Align::max_aligned_factor(size)) } else { (BackendRepr::SimdVector { element: e_repr, count }, dl.llvmlike_vector_align(size)) }; - let size = size.align_to(align.abi); + let size = size.align_to(align); Ok(LayoutData { variants: Variants::Single { index: VariantIdx::new(0) }, @@ -190,7 +190,7 @@ impl LayoutCalculator { largest_niche: elt.largest_niche, uninhabited: false, size, - align, + align: AbiAlign::new(align), max_repr_align: None, unadjusted_abi_align: elt.align.abi, randomization_seed: elt.randomization_seed.wrapping_add(Hash64::new(count)), @@ -388,7 +388,7 @@ impl LayoutCalculator { return Err(LayoutCalculatorError::UnexpectedUnsized(*field)); } - align = align.max(field.align); + align = align.max(field.align.abi); max_repr_align = max_repr_align.max(field.max_repr_align); size = cmp::max(size, field.size); @@ -423,13 +423,13 @@ impl LayoutCalculator { } if let Some(pack) = repr.pack { - align = align.min(AbiAlign::new(pack)); + align = align.min(pack); } // The unadjusted ABI alignment does not include repr(align), but does include repr(pack). // See documentation on `LayoutData::unadjusted_abi_align`. - let unadjusted_abi_align = align.abi; + let unadjusted_abi_align = align; if let Some(repr_align) = repr.align { - align = align.max(AbiAlign::new(repr_align)); + align = align.max(repr_align); } // `align` must not be modified after this, or `unadjusted_abi_align` could be inaccurate. let align = align; @@ -441,14 +441,12 @@ impl LayoutCalculator { Ok(Some((repr, _))) => match repr { // Mismatched alignment (e.g. union is #[repr(packed)]): disable opt BackendRepr::Scalar(_) | BackendRepr::ScalarPair(_, _) - if repr.scalar_align(dl).unwrap() != align.abi => + if repr.scalar_align(dl).unwrap() != align => { BackendRepr::Memory { sized: true } } // Vectors require at least element alignment, else disable the opt - BackendRepr::SimdVector { element, count: _ } - if element.align(dl).abi > align.abi => - { + BackendRepr::SimdVector { element, count: _ } if element.align(dl).abi > align => { BackendRepr::Memory { sized: true } } // the alignment tests passed and we can use this @@ -474,8 +472,8 @@ impl LayoutCalculator { backend_repr, largest_niche: None, uninhabited: false, - align, - size: size.align_to(align.abi), + align: AbiAlign::new(align), + size: size.align_to(align), max_repr_align, unadjusted_abi_align, randomization_seed: combined_seed, @@ -611,7 +609,7 @@ impl LayoutCalculator { let mut align = dl.aggregate_align; let mut max_repr_align = repr.align; - let mut unadjusted_abi_align = align.abi; + let mut unadjusted_abi_align = align; let mut variant_layouts = variants .iter_enumerated() @@ -619,7 +617,7 @@ impl LayoutCalculator { let mut st = self.univariant(v, repr, StructKind::AlwaysSized).ok()?; st.variants = Variants::Single { index: j }; - align = align.max(st.align); + align = align.max(st.align.abi); max_repr_align = max_repr_align.max(st.max_repr_align); unadjusted_abi_align = unadjusted_abi_align.max(st.unadjusted_abi_align); @@ -646,7 +644,7 @@ impl LayoutCalculator { let (niche_start, niche_scalar) = niche.reserve(dl, count)?; let niche_offset = niche.offset; let niche_size = niche.value.size(dl); - let size = variant_layouts[largest_variant_index].size.align_to(align.abi); + let size = variant_layouts[largest_variant_index].size.align_to(align); let all_variants_fit = variant_layouts.iter_enumerated_mut().all(|(i, layout)| { if i == largest_variant_index { @@ -699,7 +697,7 @@ impl LayoutCalculator { .iter_enumerated() .all(|(i, layout)| i == largest_variant_index || layout.size == Size::ZERO); let same_size = size == variant_layouts[largest_variant_index].size; - let same_align = align == variant_layouts[largest_variant_index].align; + let same_align = align == variant_layouts[largest_variant_index].align.abi; let uninhabited = variant_layouts.iter().all(|v| v.is_uninhabited()); let abi = if same_size && same_align && others_zst { @@ -746,7 +744,7 @@ impl LayoutCalculator { largest_niche, uninhabited, size, - align, + align: AbiAlign::new(align), max_repr_align, unadjusted_abi_align, randomization_seed: combined_seed, @@ -818,7 +816,7 @@ impl LayoutCalculator { let mut align = dl.aggregate_align; let mut max_repr_align = repr.align; - let mut unadjusted_abi_align = align.abi; + let mut unadjusted_abi_align = align; let mut size = Size::ZERO; @@ -860,7 +858,7 @@ impl LayoutCalculator { } } size = cmp::max(size, st.size); - align = align.max(st.align); + align = align.max(st.align.abi); max_repr_align = max_repr_align.max(st.max_repr_align); unadjusted_abi_align = unadjusted_abi_align.max(st.unadjusted_abi_align); Ok(st) @@ -868,7 +866,7 @@ impl LayoutCalculator { .collect::, _>>()?; // Align the maximum variant size to the largest alignment. - size = size.align_to(align.abi); + size = size.align_to(align); // FIXME(oli-obk): deduplicate and harden these checks if size.bytes() >= dl.obj_size_bound() { @@ -1042,7 +1040,7 @@ impl LayoutCalculator { }; if pair_offsets[FieldIdx::new(0)] == Size::ZERO && pair_offsets[FieldIdx::new(1)] == *offset - && align == pair.align + && align == pair.align.abi && size == pair.size { // We can use `ScalarPair` only when it matches our @@ -1066,7 +1064,7 @@ impl LayoutCalculator { // Also need to bump up the size and alignment, so that the entire value fits // in here. variant.size = cmp::max(variant.size, size); - variant.align.abi = cmp::max(variant.align.abi, align.abi); + variant.align.abi = cmp::max(variant.align.abi, align); } } } @@ -1092,7 +1090,7 @@ impl LayoutCalculator { largest_niche, uninhabited, backend_repr: abi, - align, + align: AbiAlign::new(align), size, max_repr_align, unadjusted_abi_align, @@ -1288,7 +1286,7 @@ impl LayoutCalculator { if let StructKind::Prefixed(prefix_size, prefix_align) = kind { let prefix_align = if let Some(pack) = pack { prefix_align.min(pack) } else { prefix_align }; - align = align.max(AbiAlign::new(prefix_align)); + align = align.max(prefix_align); offset = prefix_size.align_to(prefix_align); } for &i in &inverse_memory_index { @@ -1312,7 +1310,7 @@ impl LayoutCalculator { field.align }; offset = offset.align_to(field_align.abi); - align = align.max(field_align); + align = align.max(field_align.abi); max_repr_align = max_repr_align.max(field.max_repr_align); debug!("univariant offset: {:?} field: {:#?}", offset, field); @@ -1339,9 +1337,9 @@ impl LayoutCalculator { // The unadjusted ABI alignment does not include repr(align), but does include repr(pack). // See documentation on `LayoutData::unadjusted_abi_align`. - let unadjusted_abi_align = align.abi; + let unadjusted_abi_align = align; if let Some(repr_align) = repr.align { - align = align.max(AbiAlign::new(repr_align)); + align = align.max(repr_align); } // `align` must not be modified after this point, or `unadjusted_abi_align` could be inaccurate. let align = align; @@ -1360,7 +1358,7 @@ impl LayoutCalculator { debug_assert!(inverse_memory_index.iter().copied().eq(fields.indices())); inverse_memory_index.into_iter().map(|it| it.index() as u32).collect() }; - let size = min_size.align_to(align.abi); + let size = min_size.align_to(align); // FIXME(oli-obk): deduplicate and harden these checks if size.bytes() >= dl.obj_size_bound() { return Err(LayoutCalculatorError::SizeOverflow); @@ -1383,8 +1381,7 @@ impl LayoutCalculator { layout_of_single_non_zst_field = Some(field); // Field fills the struct and it has a scalar or scalar pair ABI. - if offsets[i].bytes() == 0 && align.abi == field.align.abi && size == field.size - { + if offsets[i].bytes() == 0 && align == field.align.abi && size == field.size { match field.backend_repr { // For plain scalars, or vectors of them, we can't unpack // newtypes for `#[repr(C)]`, as that affects C ABIs. @@ -1428,7 +1425,7 @@ impl LayoutCalculator { }; if offsets[i] == pair_offsets[FieldIdx::new(0)] && offsets[j] == pair_offsets[FieldIdx::new(1)] - && align == pair.align + && align == pair.align.abi && size == pair.size { // We can use `ScalarPair` only when it matches our @@ -1450,7 +1447,7 @@ impl LayoutCalculator { Some(l) => l.unadjusted_abi_align, None => { // `repr(transparent)` with all ZST fields. - align.abi + align } } } else { @@ -1465,7 +1462,7 @@ impl LayoutCalculator { backend_repr: abi, largest_niche, uninhabited, - align, + align: AbiAlign::new(align), size, max_repr_align, unadjusted_abi_align, diff --git a/compiler/rustc_abi/src/layout/simple.rs b/compiler/rustc_abi/src/layout/simple.rs index 0d0706defc2e5..b3807c8727396 100644 --- a/compiler/rustc_abi/src/layout/simple.rs +++ b/compiler/rustc_abi/src/layout/simple.rs @@ -4,7 +4,8 @@ use rustc_hashes::Hash64; use rustc_index::{Idx, IndexVec}; use crate::{ - BackendRepr, FieldsShape, HasDataLayout, LayoutData, Niche, Primitive, Scalar, Size, Variants, + AbiAlign, BackendRepr, FieldsShape, HasDataLayout, LayoutData, Niche, Primitive, Scalar, Size, + Variants, }; /// "Simple" layout constructors that cannot fail. @@ -20,10 +21,10 @@ impl LayoutData { backend_repr: BackendRepr::Memory { sized }, largest_niche: None, uninhabited: false, - align: dl.i8_align, + align: AbiAlign::new(dl.i8_align), size: Size::ZERO, max_repr_align: None, - unadjusted_abi_align: dl.i8_align.abi, + unadjusted_abi_align: dl.i8_align, randomization_seed: Hash64::new(0), } } @@ -37,10 +38,10 @@ impl LayoutData { backend_repr: BackendRepr::Memory { sized: true }, largest_niche: None, uninhabited: true, - align: dl.i8_align, + align: AbiAlign::new(dl.i8_align), size: Size::ZERO, max_repr_align: None, - unadjusted_abi_align: dl.i8_align.abi, + unadjusted_abi_align: dl.i8_align, randomization_seed: Hash64::ZERO, } } @@ -89,10 +90,10 @@ impl LayoutData { pub fn scalar_pair(cx: &C, a: Scalar, b: Scalar) -> Self { let dl = cx.data_layout(); - let b_align = b.align(dl); - let align = a.align(dl).max(b_align).max(dl.aggregate_align); - let b_offset = a.size(dl).align_to(b_align.abi); - let size = (b_offset + b.size(dl)).align_to(align.abi); + let b_align = b.align(dl).abi; + let align = a.align(dl).abi.max(b_align).max(dl.aggregate_align); + let b_offset = a.size(dl).align_to(b_align); + let size = (b_offset + b.size(dl)).align_to(align); // HACK(nox): We iter on `b` and then `a` because `max_by_key` // returns the last maximum. @@ -112,10 +113,10 @@ impl LayoutData { backend_repr: BackendRepr::ScalarPair(a, b), largest_niche, uninhabited: false, - align, + align: AbiAlign::new(align), size, max_repr_align: None, - unadjusted_abi_align: align.abi, + unadjusted_abi_align: align, randomization_seed: Hash64::new(combined_seed), } } @@ -138,10 +139,10 @@ impl LayoutData { backend_repr: BackendRepr::Memory { sized: true }, largest_niche: None, uninhabited: true, - align: dl.i8_align, + align: AbiAlign::new(dl.i8_align), size: Size::ZERO, max_repr_align: None, - unadjusted_abi_align: dl.i8_align.abi, + unadjusted_abi_align: dl.i8_align, randomization_seed: Hash64::ZERO, } } diff --git a/compiler/rustc_abi/src/lib.rs b/compiler/rustc_abi/src/lib.rs index 369874521e57e..fe7148d1d5729 100644 --- a/compiler/rustc_abi/src/lib.rs +++ b/compiler/rustc_abi/src/lib.rs @@ -229,7 +229,7 @@ pub struct PointerSpec { /// The size of the bitwise representation of the pointer. pointer_size: Size, /// The alignment of pointers for this address space - pointer_align: AbiAlign, + pointer_align: Align, /// The size of the value a pointer can be offset by in this address space. pointer_offset: Size, /// Pointers into this address space contain extra metadata @@ -242,20 +242,20 @@ pub struct PointerSpec { #[derive(Debug, PartialEq, Eq)] pub struct TargetDataLayout { pub endian: Endian, - pub i1_align: AbiAlign, - pub i8_align: AbiAlign, - pub i16_align: AbiAlign, - pub i32_align: AbiAlign, - pub i64_align: AbiAlign, - pub i128_align: AbiAlign, - pub f16_align: AbiAlign, - pub f32_align: AbiAlign, - pub f64_align: AbiAlign, - pub f128_align: AbiAlign, - pub aggregate_align: AbiAlign, + pub i1_align: Align, + pub i8_align: Align, + pub i16_align: Align, + pub i32_align: Align, + pub i64_align: Align, + pub i128_align: Align, + pub f16_align: Align, + pub f32_align: Align, + pub f64_align: Align, + pub f128_align: Align, + pub aggregate_align: Align, /// Alignments for vector types. - pub vector_align: Vec<(Size, AbiAlign)>, + pub vector_align: Vec<(Size, Align)>, pub default_address_space: AddressSpace, pub default_address_space_pointer_spec: PointerSpec, @@ -282,25 +282,25 @@ impl Default for TargetDataLayout { let align = |bits| Align::from_bits(bits).unwrap(); TargetDataLayout { endian: Endian::Big, - i1_align: AbiAlign::new(align(8)), - i8_align: AbiAlign::new(align(8)), - i16_align: AbiAlign::new(align(16)), - i32_align: AbiAlign::new(align(32)), - i64_align: AbiAlign::new(align(32)), - i128_align: AbiAlign::new(align(32)), - f16_align: AbiAlign::new(align(16)), - f32_align: AbiAlign::new(align(32)), - f64_align: AbiAlign::new(align(64)), - f128_align: AbiAlign::new(align(128)), - aggregate_align: AbiAlign { abi: align(8) }, + i1_align: align(8), + i8_align: align(8), + i16_align: align(16), + i32_align: align(32), + i64_align: align(32), + i128_align: align(32), + f16_align: align(16), + f32_align: align(32), + f64_align: align(64), + f128_align: align(128), + aggregate_align: align(8), vector_align: vec![ - (Size::from_bits(64), AbiAlign::new(align(64))), - (Size::from_bits(128), AbiAlign::new(align(128))), + (Size::from_bits(64), align(64)), + (Size::from_bits(128), align(128)), ], default_address_space: AddressSpace::ZERO, default_address_space_pointer_spec: PointerSpec { pointer_size: Size::from_bits(64), - pointer_align: AbiAlign::new(align(64)), + pointer_align: align(64), pointer_offset: Size::from_bits(64), _is_fat: false, }, @@ -360,7 +360,7 @@ impl TargetDataLayout { .map_err(|err| TargetDataLayoutErrors::InvalidAlignment { cause, err }) }; let abi = parse_bits(s, "alignment", cause)?; - Ok(AbiAlign::new(align_from_bits(abi)?)) + Ok(align_from_bits(abi)?) }; // Parse an alignment sequence, possibly in the form `[:]`, @@ -596,7 +596,7 @@ impl TargetDataLayout { /// psABI-mandated alignment for a vector type, if any #[inline] - fn cabi_vector_align(&self, vec_size: Size) -> Option { + fn cabi_vector_align(&self, vec_size: Size) -> Option { self.vector_align .iter() .find(|(size, _align)| *size == vec_size) @@ -605,10 +605,9 @@ impl TargetDataLayout { /// an alignment resembling the one LLVM would pick for a vector #[inline] - pub fn llvmlike_vector_align(&self, vec_size: Size) -> AbiAlign { - self.cabi_vector_align(vec_size).unwrap_or(AbiAlign::new( - Align::from_bytes(vec_size.bytes().next_power_of_two()).unwrap(), - )) + pub fn llvmlike_vector_align(&self, vec_size: Size) -> Align { + self.cabi_vector_align(vec_size) + .unwrap_or(Align::from_bytes(vec_size.bytes().next_power_of_two()).unwrap()) } /// Get the pointer size in the default data address space. @@ -654,21 +653,19 @@ impl TargetDataLayout { /// Get the pointer alignment in the default data address space. #[inline] pub fn pointer_align(&self) -> AbiAlign { - self.default_address_space_pointer_spec.pointer_align + AbiAlign::new(self.default_address_space_pointer_spec.pointer_align) } /// Get the pointer alignment in a specific address space. #[inline] pub fn pointer_align_in(&self, c: AddressSpace) -> AbiAlign { - if c == self.default_address_space { - return self.default_address_space_pointer_spec.pointer_align; - } - - if let Some(e) = self.address_space_info.iter().find(|(a, _)| a == &c) { + AbiAlign::new(if c == self.default_address_space { + self.default_address_space_pointer_spec.pointer_align + } else if let Some(e) = self.address_space_info.iter().find(|(a, _)| a == &c) { e.1.pointer_align } else { panic!("Use of unknown address space {c:?}"); - } + }) } } @@ -1185,13 +1182,13 @@ impl Integer { use Integer::*; let dl = cx.data_layout(); - match self { + AbiAlign::new(match self { I8 => dl.i8_align, I16 => dl.i16_align, I32 => dl.i32_align, I64 => dl.i64_align, I128 => dl.i128_align, - } + }) } /// Returns the largest signed value that can be represented by this Integer. @@ -1311,12 +1308,12 @@ impl Float { use Float::*; let dl = cx.data_layout(); - match self { + AbiAlign::new(match self { F16 => dl.f16_align, F32 => dl.f32_align, F64 => dl.f64_align, F128 => dl.f128_align, - } + }) } } diff --git a/compiler/rustc_codegen_llvm/src/consts.rs b/compiler/rustc_codegen_llvm/src/consts.rs index a110ecbb75d95..40375ef651092 100644 --- a/compiler/rustc_codegen_llvm/src/consts.rs +++ b/compiler/rustc_codegen_llvm/src/consts.rs @@ -564,7 +564,7 @@ impl<'ll> CodegenCx<'ll, '_> { let g = self.define_global(&sym, llty).unwrap_or_else(|| { bug!("symbol `{}` is already defined", sym); }); - set_global_alignment(self, g, self.tcx.data_layout.i8_align.abi); + set_global_alignment(self, g, self.tcx.data_layout.i8_align); llvm::set_initializer(g, llval); llvm::set_linkage(g, llvm::Linkage::PrivateLinkage); llvm::set_section(g, c"__TEXT,__cstring,cstring_literals"); @@ -680,7 +680,7 @@ impl<'ll> CodegenCx<'ll, '_> { let methname_g = self.define_global(&methname_sym, methname_llty).unwrap_or_else(|| { bug!("symbol `{}` is already defined", methname_sym); }); - set_global_alignment(self, methname_g, self.tcx.data_layout.i8_align.abi); + set_global_alignment(self, methname_g, self.tcx.data_layout.i8_align); llvm::set_initializer(methname_g, methname_llval); llvm::set_linkage(methname_g, llvm::Linkage::PrivateLinkage); llvm::set_section( diff --git a/compiler/rustc_codegen_llvm/src/intrinsic.rs b/compiler/rustc_codegen_llvm/src/intrinsic.rs index 50398a32142eb..94a74e27bbcb4 100644 --- a/compiler/rustc_codegen_llvm/src/intrinsic.rs +++ b/compiler/rustc_codegen_llvm/src/intrinsic.rs @@ -1047,7 +1047,7 @@ fn codegen_emcc_try<'ll, 'tcx>( // create an alloca and pass a pointer to that. let ptr_size = bx.tcx().data_layout.pointer_size(); let ptr_align = bx.tcx().data_layout.pointer_align().abi; - let i8_align = bx.tcx().data_layout.i8_align.abi; + let i8_align = bx.tcx().data_layout.i8_align; // Required in order for there to be no padding between the fields. assert!(i8_align <= ptr_align); let catch_data = bx.alloca(2 * ptr_size, ptr_align); diff --git a/compiler/rustc_codegen_llvm/src/va_arg.rs b/compiler/rustc_codegen_llvm/src/va_arg.rs index ab08125217ff5..4d6fc3e19c5c6 100644 --- a/compiler/rustc_codegen_llvm/src/va_arg.rs +++ b/compiler/rustc_codegen_llvm/src/va_arg.rs @@ -291,7 +291,7 @@ fn emit_powerpc_va_arg<'ll, 'tcx>( bx.inbounds_ptradd(va_list_addr, bx.const_usize(1)) // fpr }; - let mut num_regs = bx.load(bx.type_i8(), num_regs_addr, dl.i8_align.abi); + let mut num_regs = bx.load(bx.type_i8(), num_regs_addr, dl.i8_align); // "Align" the register count when the type is passed as `i64`. if is_i64 || (is_f64 && is_soft_float_abi) { @@ -329,7 +329,7 @@ fn emit_powerpc_va_arg<'ll, 'tcx>( // Increase the used-register count. let reg_incr = if is_i64 || (is_f64 && is_soft_float_abi) { 2 } else { 1 }; let new_num_regs = bx.add(num_regs, bx.cx.const_u8(reg_incr)); - bx.store(new_num_regs, num_regs_addr, dl.i8_align.abi); + bx.store(new_num_regs, num_regs_addr, dl.i8_align); bx.br(end); @@ -339,7 +339,7 @@ fn emit_powerpc_va_arg<'ll, 'tcx>( let mem_addr = { bx.switch_to_block(in_mem); - bx.store(bx.const_u8(max_regs), num_regs_addr, dl.i8_align.abi); + bx.store(bx.const_u8(max_regs), num_regs_addr, dl.i8_align); // Everything in the overflow area is rounded up to a size of at least 4. let overflow_area_align = Align::from_bytes(4).unwrap(); @@ -813,7 +813,7 @@ fn emit_xtensa_va_arg<'ll, 'tcx>( let va_ndx_offset = va_reg_offset + 4; let offset_ptr = bx.inbounds_ptradd(va_list_addr, bx.cx.const_usize(va_ndx_offset)); - let offset = bx.load(bx.type_i32(), offset_ptr, bx.tcx().data_layout.i32_align.abi); + let offset = bx.load(bx.type_i32(), offset_ptr, bx.tcx().data_layout.i32_align); let offset = round_up_to_alignment(bx, offset, layout.align.abi); let slot_size = layout.size.align_to(Align::from_bytes(4).unwrap()).bytes() as i32; diff --git a/compiler/rustc_session/src/config/cfg.rs b/compiler/rustc_session/src/config/cfg.rs index f3d91ce4a5dd8..a72f6201dcea2 100644 --- a/compiler/rustc_session/src/config/cfg.rs +++ b/compiler/rustc_session/src/config/cfg.rs @@ -259,11 +259,11 @@ pub(crate) fn default_configuration(sess: &Session) -> Cfg { }); let mut has_atomic = false; for (i, align) in [ - (8, layout.i8_align.abi), - (16, layout.i16_align.abi), - (32, layout.i32_align.abi), - (64, layout.i64_align.abi), - (128, layout.i128_align.abi), + (8, layout.i8_align), + (16, layout.i16_align), + (32, layout.i32_align), + (64, layout.i64_align), + (128, layout.i128_align), ] { if i >= sess.target.min_atomic_width() && i <= sess.target.max_atomic_width() { if !has_atomic { diff --git a/compiler/rustc_target/src/callconv/mips.rs b/compiler/rustc_target/src/callconv/mips.rs index 48a01da865b7c..8ffd7bd177849 100644 --- a/compiler/rustc_target/src/callconv/mips.rs +++ b/compiler/rustc_target/src/callconv/mips.rs @@ -24,7 +24,7 @@ where } let dl = cx.data_layout(); let size = arg.layout.size; - let align = arg.layout.align.max(dl.i32_align).min(dl.i64_align).abi; + let align = arg.layout.align.abi.max(dl.i32_align).min(dl.i64_align); if arg.layout.is_aggregate() { let pad_i32 = !offset.is_aligned(align); diff --git a/compiler/rustc_target/src/callconv/mips64.rs b/compiler/rustc_target/src/callconv/mips64.rs index 0209838bec181..8386a15933c98 100644 --- a/compiler/rustc_target/src/callconv/mips64.rs +++ b/compiler/rustc_target/src/callconv/mips64.rs @@ -110,9 +110,9 @@ where // We only care about aligned doubles if let BackendRepr::Scalar(scalar) = field.backend_repr { if scalar.primitive() == Primitive::Float(Float::F64) { - if offset.is_aligned(dl.f64_align.abi) { + if offset.is_aligned(dl.f64_align) { // Insert enough integers to cover [last_offset, offset) - assert!(last_offset.is_aligned(dl.f64_align.abi)); + assert!(last_offset.is_aligned(dl.f64_align)); for _ in 0..((offset - last_offset).bits() / 64) .min((prefix.len() - prefix_index) as u64) { diff --git a/compiler/rustc_target/src/callconv/mod.rs b/compiler/rustc_target/src/callconv/mod.rs index 7a7c63c475b04..c59af581a1fe4 100644 --- a/compiler/rustc_target/src/callconv/mod.rs +++ b/compiler/rustc_target/src/callconv/mod.rs @@ -332,7 +332,7 @@ impl CastTarget { self.prefix .iter() .filter_map(|x| x.map(|reg| reg.align(cx))) - .fold(cx.data_layout().aggregate_align.abi.max(self.rest.align(cx)), |acc, align| { + .fold(cx.data_layout().aggregate_align.max(self.rest.align(cx)), |acc, align| { acc.max(align) }) } diff --git a/compiler/rustc_target/src/callconv/sparc.rs b/compiler/rustc_target/src/callconv/sparc.rs index 48a01da865b7c..8ffd7bd177849 100644 --- a/compiler/rustc_target/src/callconv/sparc.rs +++ b/compiler/rustc_target/src/callconv/sparc.rs @@ -24,7 +24,7 @@ where } let dl = cx.data_layout(); let size = arg.layout.size; - let align = arg.layout.align.max(dl.i32_align).min(dl.i64_align).abi; + let align = arg.layout.align.abi.max(dl.i32_align).min(dl.i64_align); if arg.layout.is_aggregate() { let pad_i32 = !offset.is_aligned(align); diff --git a/compiler/rustc_target/src/callconv/sparc64.rs b/compiler/rustc_target/src/callconv/sparc64.rs index ecc9067ced35e..62c8ed1dc21b1 100644 --- a/compiler/rustc_target/src/callconv/sparc64.rs +++ b/compiler/rustc_target/src/callconv/sparc64.rs @@ -29,7 +29,7 @@ where data.has_float = true; - if !data.last_offset.is_aligned(dl.f64_align.abi) && data.last_offset < offset { + if !data.last_offset.is_aligned(dl.f64_align) && data.last_offset < offset { if data.prefix_index == data.prefix.len() { return data; } From 0c9d0dfe046f0674f0507df564504ac3bac862d9 Mon Sep 17 00:00:00 2001 From: Jubilee Young Date: Sun, 28 Sep 2025 14:40:39 -0700 Subject: [PATCH 3/3] remove explicit deref of AbiAlign for most methods Much of the compiler calls functions on Align projected from AbiAlign. AbiAlign impls Deref to its inner Align, so we can simplify these away. Also, it will minimize disruption when AbiAlign is removed. For now, preserve usages that might resolve to PartialOrd or PartialEq, as those have odd inference. --- compiler/rustc_abi/src/layout.rs | 6 +++--- compiler/rustc_abi/src/lib.rs | 2 +- compiler/rustc_codegen_cranelift/src/abi/comments.rs | 2 +- compiler/rustc_codegen_cranelift/src/abi/pass_mode.rs | 2 +- compiler/rustc_codegen_cranelift/src/base.rs | 2 +- compiler/rustc_codegen_cranelift/src/debuginfo/mod.rs | 2 +- .../rustc_codegen_cranelift/src/debuginfo/types.rs | 6 ++---- compiler/rustc_codegen_cranelift/src/unsize.rs | 6 +++--- .../rustc_codegen_cranelift/src/value_and_place.rs | 6 +++--- compiler/rustc_codegen_gcc/src/context.rs | 6 +++--- compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs | 2 +- .../src/debuginfo/metadata/enums/native.rs | 4 ++-- compiler/rustc_codegen_llvm/src/intrinsic.rs | 2 +- compiler/rustc_codegen_llvm/src/va_arg.rs | 4 ++-- compiler/rustc_codegen_ssa/src/mir/rvalue.rs | 2 +- compiler/rustc_codegen_ssa/src/size_of_val.rs | 8 ++++---- compiler/rustc_const_eval/src/interpret/operator.rs | 2 +- compiler/rustc_const_eval/src/util/alignment.rs | 2 +- .../src/util/check_validity_requirement.rs | 2 +- .../rustc_hir_analysis/src/hir_ty_lowering/cmse.rs | 2 +- compiler/rustc_middle/src/ty/vtable.rs | 2 +- .../rustc_mir_transform/src/dataflow_const_prop.rs | 2 +- compiler/rustc_mir_transform/src/gvn.rs | 2 +- compiler/rustc_mir_transform/src/known_panics_lint.rs | 2 +- compiler/rustc_target/src/callconv/arm.rs | 2 +- compiler/rustc_target/src/callconv/loongarch.rs | 2 +- compiler/rustc_target/src/callconv/nvptx64.rs | 4 ++-- compiler/rustc_target/src/callconv/powerpc64.rs | 2 +- compiler/rustc_target/src/callconv/riscv.rs | 2 +- compiler/rustc_target/src/callconv/xtensa.rs | 2 +- compiler/rustc_transmute/src/layout/tree.rs | 2 +- compiler/rustc_ty_utils/src/layout.rs | 10 +++++----- compiler/rustc_ty_utils/src/layout/invariant.rs | 6 +++--- .../clippy_lints/src/casts/cast_ptr_alignment.rs | 4 ++-- .../clippy_lints/src/casts/manual_dangling_ptr.rs | 2 +- src/tools/miri/src/machine.rs | 2 +- .../rust-analyzer/crates/hir-ty/src/layout/tests.rs | 4 ++-- src/tools/rust-analyzer/crates/hir-ty/src/mir/eval.rs | 6 +++--- .../rust-analyzer/crates/hir-ty/src/mir/eval/shim.rs | 4 ++-- src/tools/rust-analyzer/crates/hir/src/lib.rs | 2 +- 40 files changed, 66 insertions(+), 68 deletions(-) diff --git a/compiler/rustc_abi/src/layout.rs b/compiler/rustc_abi/src/layout.rs index 5004d0c80220f..d56fbb5cb94f2 100644 --- a/compiler/rustc_abi/src/layout.rs +++ b/compiler/rustc_abi/src/layout.rs @@ -1169,7 +1169,7 @@ impl LayoutCalculator { // To allow unsizing `&Foo` -> `&Foo`, the layout of the struct must // not depend on the layout of the tail. let max_field_align = - fields_excluding_tail.iter().map(|f| f.align.abi.bytes()).max().unwrap_or(1); + fields_excluding_tail.iter().map(|f| f.align.bytes()).max().unwrap_or(1); let largest_niche_size = fields_excluding_tail .iter() .filter_map(|f| f.largest_niche) @@ -1189,7 +1189,7 @@ impl LayoutCalculator { } else { // Returns `log2(effective-align)`. The calculation assumes that size is an // integer multiple of align, except for ZSTs. - let align = layout.align.abi.bytes(); + let align = layout.align.bytes(); let size = layout.size.bytes(); let niche_size = layout.largest_niche.map(|n| n.available(dl)).unwrap_or(0); // Group [u8; 4] with align-4 or [u8; 6] with align-2 fields. @@ -1488,7 +1488,7 @@ impl LayoutCalculator { for i in layout.fields.index_by_increasing_offset() { let offset = layout.fields.offset(i); let f = &fields[FieldIdx::new(i)]; - write!(s, "[o{}a{}s{}", offset.bytes(), f.align.abi.bytes(), f.size.bytes()).unwrap(); + write!(s, "[o{}a{}s{}", offset.bytes(), f.align.bytes(), f.size.bytes()).unwrap(); if let Some(n) = f.largest_niche { write!( s, diff --git a/compiler/rustc_abi/src/lib.rs b/compiler/rustc_abi/src/lib.rs index 369874521e57e..9a364f9be184a 100644 --- a/compiler/rustc_abi/src/lib.rs +++ b/compiler/rustc_abi/src/lib.rs @@ -2159,7 +2159,7 @@ impl LayoutData { /// Returns `true` if the type is sized and a 1-ZST (meaning it has size 0 and alignment 1). pub fn is_1zst(&self) -> bool { - self.is_sized() && self.size.bytes() == 0 && self.align.abi.bytes() == 1 + self.is_sized() && self.size.bytes() == 0 && self.align.bytes() == 1 } /// Returns `true` if the type is a ZST and not unsized. diff --git a/compiler/rustc_codegen_cranelift/src/abi/comments.rs b/compiler/rustc_codegen_cranelift/src/abi/comments.rs index c74efeb59f3fc..d1b2b9a502ac2 100644 --- a/compiler/rustc_codegen_cranelift/src/abi/comments.rs +++ b/compiler/rustc_codegen_cranelift/src/abi/comments.rs @@ -89,7 +89,7 @@ pub(super) fn add_local_place_comments<'tcx>( format!("{:?}", local), format!("{:?}", ty), size.bytes(), - align.abi.bytes(), + align.bytes(), if extra.is_empty() { "" } else { " " }, extra, )); diff --git a/compiler/rustc_codegen_cranelift/src/abi/pass_mode.rs b/compiler/rustc_codegen_cranelift/src/abi/pass_mode.rs index 2031842062d97..7a909a740b054 100644 --- a/compiler/rustc_codegen_cranelift/src/abi/pass_mode.rs +++ b/compiler/rustc_codegen_cranelift/src/abi/pass_mode.rs @@ -233,7 +233,7 @@ pub(super) fn from_casted_value<'tcx>( // It may also be smaller for example when the type is a wrapper around an integer with a // larger alignment than the integer. std::cmp::max(abi_param_size, layout_size), - u32::try_from(layout.align.abi.bytes()).unwrap(), + u32::try_from(layout.align.bytes()).unwrap(), ); let mut block_params_iter = block_params.iter().copied(); for (offset, _) in abi_params { diff --git a/compiler/rustc_codegen_cranelift/src/base.rs b/compiler/rustc_codegen_cranelift/src/base.rs index 41e11e1de6163..2cc5b82ddd345 100644 --- a/compiler/rustc_codegen_cranelift/src/base.rs +++ b/compiler/rustc_codegen_cranelift/src/base.rs @@ -846,7 +846,7 @@ fn codegen_stmt<'tcx>(fx: &mut FunctionCx<'_, '_, 'tcx>, cur_block: Block, stmt: let layout = fx.layout_of(fx.monomorphize(ty)); let val = match null_op { NullOp::SizeOf => layout.size.bytes(), - NullOp::AlignOf => layout.align.abi.bytes(), + NullOp::AlignOf => layout.align.bytes(), NullOp::OffsetOf(fields) => fx .tcx .offset_of_subfield( diff --git a/compiler/rustc_codegen_cranelift/src/debuginfo/mod.rs b/compiler/rustc_codegen_cranelift/src/debuginfo/mod.rs index 286e02b986b3c..4c438742f3d22 100644 --- a/compiler/rustc_codegen_cranelift/src/debuginfo/mod.rs +++ b/compiler/rustc_codegen_cranelift/src/debuginfo/mod.rs @@ -304,7 +304,7 @@ impl DebugContext { entry.set(gimli::DW_AT_decl_file, AttributeValue::FileIndex(Some(file_id))); entry.set(gimli::DW_AT_decl_line, AttributeValue::Udata(line)); - entry.set(gimli::DW_AT_alignment, AttributeValue::Udata(static_layout.align.abi.bytes())); + entry.set(gimli::DW_AT_alignment, AttributeValue::Udata(static_layout.align.bytes())); let mut expr = Expression::new(); expr.op_addr(address_for_data(data_id)); diff --git a/compiler/rustc_codegen_cranelift/src/debuginfo/types.rs b/compiler/rustc_codegen_cranelift/src/debuginfo/types.rs index 25b922c8be4c7..0d49f32373caa 100644 --- a/compiler/rustc_codegen_cranelift/src/debuginfo/types.rs +++ b/compiler/rustc_codegen_cranelift/src/debuginfo/types.rs @@ -166,7 +166,7 @@ impl DebugContext { let tuple_entry = self.dwarf.unit.get_mut(tuple_type_id); tuple_entry.set(gimli::DW_AT_name, AttributeValue::StringRef(self.dwarf.strings.add(name))); tuple_entry.set(gimli::DW_AT_byte_size, AttributeValue::Udata(layout.size.bytes())); - tuple_entry.set(gimli::DW_AT_alignment, AttributeValue::Udata(layout.align.abi.bytes())); + tuple_entry.set(gimli::DW_AT_alignment, AttributeValue::Udata(layout.align.bytes())); for (i, (ty, dw_ty)) in components.into_iter().enumerate() { let member_id = self.dwarf.unit.add(tuple_type_id, gimli::DW_TAG_member); @@ -178,9 +178,7 @@ impl DebugContext { member_entry.set(gimli::DW_AT_type, AttributeValue::UnitRef(dw_ty)); member_entry.set( gimli::DW_AT_alignment, - AttributeValue::Udata( - FullyMonomorphizedLayoutCx(tcx).layout_of(ty).align.abi.bytes(), - ), + AttributeValue::Udata(FullyMonomorphizedLayoutCx(tcx).layout_of(ty).align.bytes()), ); member_entry.set( gimli::DW_AT_data_member_location, diff --git a/compiler/rustc_codegen_cranelift/src/unsize.rs b/compiler/rustc_codegen_cranelift/src/unsize.rs index 643c7feb89a26..d994f3e32ec3c 100644 --- a/compiler/rustc_codegen_cranelift/src/unsize.rs +++ b/compiler/rustc_codegen_cranelift/src/unsize.rs @@ -167,7 +167,7 @@ pub(crate) fn size_and_align_of<'tcx>( if layout.is_sized() { return ( fx.bcx.ins().iconst(fx.pointer_type, layout.size.bytes() as i64), - fx.bcx.ins().iconst(fx.pointer_type, layout.align.abi.bytes() as i64), + fx.bcx.ins().iconst(fx.pointer_type, layout.align.bytes() as i64), ); } @@ -186,7 +186,7 @@ pub(crate) fn size_and_align_of<'tcx>( // times the unit size. ( fx.bcx.ins().imul_imm(info.unwrap(), unit.size.bytes() as i64), - fx.bcx.ins().iconst(fx.pointer_type, unit.align.abi.bytes() as i64), + fx.bcx.ins().iconst(fx.pointer_type, unit.align.bytes() as i64), ) } ty::Foreign(_) => { @@ -224,7 +224,7 @@ pub(crate) fn size_and_align_of<'tcx>( let unsized_offset_unadjusted = layout.fields.offset(i).bytes(); let unsized_offset_unadjusted = fx.bcx.ins().iconst(fx.pointer_type, unsized_offset_unadjusted as i64); - let sized_align = layout.align.abi.bytes(); + let sized_align = layout.align.bytes(); let sized_align = fx.bcx.ins().iconst(fx.pointer_type, sized_align as i64); // Recurse to get the size of the dynamically sized field (must be diff --git a/compiler/rustc_codegen_cranelift/src/value_and_place.rs b/compiler/rustc_codegen_cranelift/src/value_and_place.rs index 4519fa1a270e4..04e10cf17088c 100644 --- a/compiler/rustc_codegen_cranelift/src/value_and_place.rs +++ b/compiler/rustc_codegen_cranelift/src/value_and_place.rs @@ -383,7 +383,7 @@ impl<'tcx> CPlace<'tcx> { let stack_slot = fx.create_stack_slot( u32::try_from(layout.size.bytes()).unwrap(), - u32::try_from(layout.align.abi.bytes()).unwrap(), + u32::try_from(layout.align.bytes()).unwrap(), ); CPlace { inner: CPlaceInner::Addr(stack_slot, None), layout } } @@ -641,8 +641,8 @@ impl<'tcx> CPlace<'tcx> { let size = dst_layout.size.bytes(); // `emit_small_memory_copy` uses `u8` for alignments, just use the maximum // alignment that fits in a `u8` if the actual alignment is larger. - let src_align = src_layout.align.abi.bytes().try_into().unwrap_or(128); - let dst_align = dst_layout.align.abi.bytes().try_into().unwrap_or(128); + let src_align = src_layout.align.bytes().try_into().unwrap_or(128); + let dst_align = dst_layout.align.bytes().try_into().unwrap_or(128); fx.bcx.emit_small_memory_copy( fx.target_config, to_addr, diff --git a/compiler/rustc_codegen_gcc/src/context.rs b/compiler/rustc_codegen_gcc/src/context.rs index 9815fb07eaae9..c9ae96777de44 100644 --- a/compiler/rustc_codegen_gcc/src/context.rs +++ b/compiler/rustc_codegen_gcc/src/context.rs @@ -147,7 +147,7 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { let layout = tcx .layout_of(ty::TypingEnv::fully_monomorphized().as_query_input(rust_type)) .unwrap(); - let align = layout.align.abi.bytes(); + let align = layout.align.bytes(); // For types with size 1, the alignment can be 1 and only 1 // So, we can skip the call to ``get_aligned`. // In the future, we can add a GCC API to query the type align, @@ -186,9 +186,9 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { (i128_type, u128_type) } else { /*let layout = tcx.layout_of(ParamEnv::reveal_all().and(tcx.types.i128)).unwrap(); - let i128_align = layout.align.abi.bytes(); + let i128_align = layout.align.bytes(); let layout = tcx.layout_of(ParamEnv::reveal_all().and(tcx.types.u128)).unwrap(); - let u128_align = layout.align.abi.bytes();*/ + let u128_align = layout.align.bytes();*/ // TODO(antoyo): re-enable the alignment when libgccjit fixed the issue in // gcc_jit_context_new_array_constructor (it should not use reinterpret_cast). diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs b/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs index 4b74c04ed7ae0..1e4ace4ca922c 100644 --- a/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs +++ b/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs @@ -1043,7 +1043,7 @@ fn create_member_type<'ll, 'tcx>( file_metadata, line_number, layout.size.bits(), - layout.align.abi.bits() as u32, + layout.align.bits() as u32, offset.bits(), flags, type_di_node, diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/native.rs b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/native.rs index 62d38d463aba7..1ae6e6e5eecab 100644 --- a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/native.rs +++ b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/native.rs @@ -289,7 +289,7 @@ fn build_enum_variant_part_di_node<'ll, 'tcx>( file_metadata, line_number, enum_type_and_layout.size.bits(), - enum_type_and_layout.align.abi.bits() as u32, + enum_type_and_layout.align.bits() as u32, DIFlags::FlagZero, tag_member_di_node, create_DIArray(DIB(cx), &[]), @@ -449,7 +449,7 @@ fn build_enum_variant_member_di_node<'ll, 'tcx>( file_di_node, line_number, enum_type_and_layout.size.bits(), - enum_type_and_layout.align.abi.bits() as u32, + enum_type_and_layout.align.bits() as u32, Size::ZERO.bits(), discr, DIFlags::FlagZero, diff --git a/compiler/rustc_codegen_llvm/src/intrinsic.rs b/compiler/rustc_codegen_llvm/src/intrinsic.rs index 013108d1286e1..ba80352916b08 100644 --- a/compiler/rustc_codegen_llvm/src/intrinsic.rs +++ b/compiler/rustc_codegen_llvm/src/intrinsic.rs @@ -297,7 +297,7 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> { let align = if name == sym::unaligned_volatile_load { 1 } else { - result.layout.align.abi.bytes() as u32 + result.layout.align.bytes() as u32 }; unsafe { llvm::LLVMSetAlignment(load, align); diff --git a/compiler/rustc_codegen_llvm/src/va_arg.rs b/compiler/rustc_codegen_llvm/src/va_arg.rs index d48c7cf874a0a..b2c60db9c679a 100644 --- a/compiler/rustc_codegen_llvm/src/va_arg.rs +++ b/compiler/rustc_codegen_llvm/src/va_arg.rs @@ -193,7 +193,7 @@ fn emit_aapcs_va_arg<'ll, 'tcx>( // the offset again. bx.switch_to_block(maybe_reg); - if gr_type && layout.align.abi.bytes() > 8 { + if gr_type && layout.align.bytes() > 8 { reg_off_v = bx.add(reg_off_v, bx.const_i32(15)); reg_off_v = bx.and(reg_off_v, bx.const_i32(-16)); } @@ -761,7 +761,7 @@ fn x86_64_sysv64_va_arg_from_memory<'ll, 'tcx>( // byte boundary if alignment needed by type exceeds 8 byte boundary. // It isn't stated explicitly in the standard, but in practice we use // alignment greater than 16 where necessary. - if layout.layout.align.abi.bytes() > 8 { + if layout.layout.align.bytes() > 8 { unreachable!("all instances of VaArgSafe have an alignment <= 8"); } diff --git a/compiler/rustc_codegen_ssa/src/mir/rvalue.rs b/compiler/rustc_codegen_ssa/src/mir/rvalue.rs index 2602bf82095c5..0a4b0f8d49498 100644 --- a/compiler/rustc_codegen_ssa/src/mir/rvalue.rs +++ b/compiler/rustc_codegen_ssa/src/mir/rvalue.rs @@ -617,7 +617,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { } mir::NullOp::AlignOf => { assert!(bx.cx().type_is_sized(ty)); - let val = layout.align.abi.bytes(); + let val = layout.align.bytes(); bx.cx().const_usize(val) } mir::NullOp::OffsetOf(fields) => { diff --git a/compiler/rustc_codegen_ssa/src/size_of_val.rs b/compiler/rustc_codegen_ssa/src/size_of_val.rs index 577012151e49f..e1bd8014d7a2f 100644 --- a/compiler/rustc_codegen_ssa/src/size_of_val.rs +++ b/compiler/rustc_codegen_ssa/src/size_of_val.rs @@ -21,7 +21,7 @@ pub fn size_and_align_of_dst<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( trace!("size_and_align_of_dst(ty={}, info={:?}): layout: {:?}", t, info, layout); if layout.is_sized() { let size = bx.const_usize(layout.size.bytes()); - let align = bx.const_usize(layout.align.abi.bytes()); + let align = bx.const_usize(layout.align.bytes()); return (size, align); } match t.kind() { @@ -49,7 +49,7 @@ pub fn size_and_align_of_dst<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( // All slice sizes must fit into `isize`, so this multiplication cannot // wrap -- neither signed nor unsigned. bx.unchecked_sumul(info.unwrap(), bx.const_usize(unit.size.bytes())), - bx.const_usize(unit.align.abi.bytes()), + bx.const_usize(unit.align.bytes()), ) } ty::Foreign(_) => { @@ -82,7 +82,7 @@ pub fn size_and_align_of_dst<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( // This function does not return so we can now return whatever we want. let size = bx.const_usize(layout.size.bytes()); - let align = bx.const_usize(layout.align.abi.bytes()); + let align = bx.const_usize(layout.align.bytes()); (size, align) } ty::Adt(..) | ty::Tuple(..) => { @@ -94,7 +94,7 @@ pub fn size_and_align_of_dst<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( let i = layout.fields.count() - 1; let unsized_offset_unadjusted = layout.fields.offset(i).bytes(); - let sized_align = layout.align.abi.bytes(); + let sized_align = layout.align.bytes(); debug!( "DST {} offset of dyn field: {}, statically sized align: {}", t, unsized_offset_unadjusted, sized_align diff --git a/compiler/rustc_const_eval/src/interpret/operator.rs b/compiler/rustc_const_eval/src/interpret/operator.rs index 74f8a0a7b093c..f0819423aa0fa 100644 --- a/compiler/rustc_const_eval/src/interpret/operator.rs +++ b/compiler/rustc_const_eval/src/interpret/operator.rs @@ -528,7 +528,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { if !layout.is_sized() { span_bug!(self.cur_span(), "unsized type for `NullaryOp::AlignOf`"); } - let val = layout.align.abi.bytes(); + let val = layout.align.bytes(); ImmTy::from_uint(val, usize_layout()) } OffsetOf(fields) => { diff --git a/compiler/rustc_const_eval/src/util/alignment.rs b/compiler/rustc_const_eval/src/util/alignment.rs index 9507b24f603eb..9aafc7efd8a6a 100644 --- a/compiler/rustc_const_eval/src/util/alignment.rs +++ b/compiler/rustc_const_eval/src/util/alignment.rs @@ -37,7 +37,7 @@ where debug!( "is_disaligned({:?}) - align = {}, packed = {}; not disaligned", place, - layout.align.abi.bytes(), + layout.align.bytes(), pack.bytes() ); false diff --git a/compiler/rustc_const_eval/src/util/check_validity_requirement.rs b/compiler/rustc_const_eval/src/util/check_validity_requirement.rs index b1f2959875051..1dea7e4252d7f 100644 --- a/compiler/rustc_const_eval/src/util/check_validity_requirement.rs +++ b/compiler/rustc_const_eval/src/util/check_validity_requirement.rs @@ -129,7 +129,7 @@ fn check_validity_requirement_lax<'tcx>( if let Some(pointee) = this.ty.builtin_deref(false) { let pointee = cx.layout_of(pointee)?; // We need to ensure that the LLVM attributes `aligned` and `dereferenceable(size)` are satisfied. - if pointee.align.abi.bytes() > 1 { + if pointee.align.bytes() > 1 { // 0x01-filling is not aligned. return Ok(false); } diff --git a/compiler/rustc_hir_analysis/src/hir_ty_lowering/cmse.rs b/compiler/rustc_hir_analysis/src/hir_ty_lowering/cmse.rs index 81deb35920afb..0458fa1204e81 100644 --- a/compiler/rustc_hir_analysis/src/hir_ty_lowering/cmse.rs +++ b/compiler/rustc_hir_analysis/src/hir_ty_lowering/cmse.rs @@ -138,7 +138,7 @@ fn is_valid_cmse_inputs<'tcx>( for (index, ty) in fn_sig.inputs().iter().enumerate() { let layout = tcx.layout_of(ty::TypingEnv::fully_monomorphized().as_query_input(*ty))?; - let align = layout.layout.align().abi.bytes(); + let align = layout.layout.align().bytes(); let size = layout.layout.size().bytes(); accum += size; diff --git a/compiler/rustc_middle/src/ty/vtable.rs b/compiler/rustc_middle/src/ty/vtable.rs index e2f09fdcb4b43..a3e9054fdcb8b 100644 --- a/compiler/rustc_middle/src/ty/vtable.rs +++ b/compiler/rustc_middle/src/ty/vtable.rs @@ -104,7 +104,7 @@ pub(super) fn vtable_allocation_provider<'tcx>( .expect("failed to build vtable representation"); assert!(layout.is_sized(), "can't create a vtable for an unsized type"); let size = layout.size.bytes(); - let align = layout.align.abi.bytes(); + let align = layout.align.bytes(); let ptr_size = tcx.data_layout.pointer_size(); let ptr_align = tcx.data_layout.pointer_align().abi; diff --git a/compiler/rustc_mir_transform/src/dataflow_const_prop.rs b/compiler/rustc_mir_transform/src/dataflow_const_prop.rs index 5c984984d3cc3..491e910ff6f84 100644 --- a/compiler/rustc_mir_transform/src/dataflow_const_prop.rs +++ b/compiler/rustc_mir_transform/src/dataflow_const_prop.rs @@ -476,7 +476,7 @@ impl<'a, 'tcx> ConstAnalysis<'a, 'tcx> { }; let val = match null_op { NullOp::SizeOf if layout.is_sized() => layout.size.bytes(), - NullOp::AlignOf if layout.is_sized() => layout.align.abi.bytes(), + NullOp::AlignOf if layout.is_sized() => layout.align.bytes(), NullOp::OffsetOf(fields) => self .ecx .tcx diff --git a/compiler/rustc_mir_transform/src/gvn.rs b/compiler/rustc_mir_transform/src/gvn.rs index ebec3d125003d..29f6879aacd0f 100644 --- a/compiler/rustc_mir_transform/src/gvn.rs +++ b/compiler/rustc_mir_transform/src/gvn.rs @@ -618,7 +618,7 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { } let val = match null_op { NullOp::SizeOf => arg_layout.size.bytes(), - NullOp::AlignOf => arg_layout.align.abi.bytes(), + NullOp::AlignOf => arg_layout.align.bytes(), NullOp::OffsetOf(fields) => self .ecx .tcx diff --git a/compiler/rustc_mir_transform/src/known_panics_lint.rs b/compiler/rustc_mir_transform/src/known_panics_lint.rs index aaacc5866a2ae..5fffba55f17b6 100644 --- a/compiler/rustc_mir_transform/src/known_panics_lint.rs +++ b/compiler/rustc_mir_transform/src/known_panics_lint.rs @@ -609,7 +609,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> { let op_layout = self.ecx.layout_of(ty).ok()?; let val = match null_op { NullOp::SizeOf => op_layout.size.bytes(), - NullOp::AlignOf => op_layout.align.abi.bytes(), + NullOp::AlignOf => op_layout.align.bytes(), NullOp::OffsetOf(fields) => self .tcx .offset_of_subfield(self.typing_env, op_layout, fields.iter()) diff --git a/compiler/rustc_target/src/callconv/arm.rs b/compiler/rustc_target/src/callconv/arm.rs index 70830fa07b6ea..abc9a404e2ea4 100644 --- a/compiler/rustc_target/src/callconv/arm.rs +++ b/compiler/rustc_target/src/callconv/arm.rs @@ -77,7 +77,7 @@ where } } - let align = arg.layout.align.abi.bytes(); + let align = arg.layout.align.bytes(); let total = arg.layout.size; arg.cast_to(Uniform::consecutive(if align <= 4 { Reg::i32() } else { Reg::i64() }, total)); } diff --git a/compiler/rustc_target/src/callconv/loongarch.rs b/compiler/rustc_target/src/callconv/loongarch.rs index 9213d73e24eac..bc3c9601fa3d3 100644 --- a/compiler/rustc_target/src/callconv/loongarch.rs +++ b/compiler/rustc_target/src/callconv/loongarch.rs @@ -322,7 +322,7 @@ fn classify_arg<'a, Ty, C>( } let total = arg.layout.size; - let align = arg.layout.align.abi.bits(); + let align = arg.layout.align.bits(); // "Scalars wider than 2✕XLEN are passed by reference and are replaced in // the argument list with the address." diff --git a/compiler/rustc_target/src/callconv/nvptx64.rs b/compiler/rustc_target/src/callconv/nvptx64.rs index 44977de7fcbc2..dc32dd87a7e76 100644 --- a/compiler/rustc_target/src/callconv/nvptx64.rs +++ b/compiler/rustc_target/src/callconv/nvptx64.rs @@ -21,7 +21,7 @@ fn classify_arg(arg: &mut ArgAbi<'_, Ty>) { /// the pass mode used for aggregates in arg and ret position fn classify_aggregate(arg: &mut ArgAbi<'_, Ty>) { - let align_bytes = arg.layout.align.abi.bytes(); + let align_bytes = arg.layout.align.bytes(); let size = arg.layout.size; let reg = match align_bytes { @@ -60,7 +60,7 @@ where // "`extern \"ptx-kernel\"` doesn't allow passing types other than primitives and structs" // ); - let align_bytes = arg.layout.align.abi.bytes(); + let align_bytes = arg.layout.align.bytes(); let unit = match align_bytes { 1 => Reg::i8(), diff --git a/compiler/rustc_target/src/callconv/powerpc64.rs b/compiler/rustc_target/src/callconv/powerpc64.rs index 89ec85e4b6664..be1d13816eff7 100644 --- a/compiler/rustc_target/src/callconv/powerpc64.rs +++ b/compiler/rustc_target/src/callconv/powerpc64.rs @@ -89,7 +89,7 @@ where // Aggregates larger than i64 should be padded at the tail to fill out a whole number // of i64s or i128s, depending on the aggregate alignment. Always use an array for // this, even if there is only a single element. - let reg = if arg.layout.align.abi.bytes() > 8 { Reg::i128() } else { Reg::i64() }; + let reg = if arg.layout.align.bytes() > 8 { Reg::i128() } else { Reg::i64() }; arg.cast_to(Uniform::consecutive( reg, size.align_to(Align::from_bytes(reg.size.bytes()).unwrap()), diff --git a/compiler/rustc_target/src/callconv/riscv.rs b/compiler/rustc_target/src/callconv/riscv.rs index 161e2c1645f9a..16de3fe070dd4 100644 --- a/compiler/rustc_target/src/callconv/riscv.rs +++ b/compiler/rustc_target/src/callconv/riscv.rs @@ -328,7 +328,7 @@ fn classify_arg<'a, Ty, C>( } let total = arg.layout.size; - let align = arg.layout.align.abi.bits(); + let align = arg.layout.align.bits(); // "Scalars wider than 2✕XLEN are passed by reference and are replaced in // the argument list with the address." diff --git a/compiler/rustc_target/src/callconv/xtensa.rs b/compiler/rustc_target/src/callconv/xtensa.rs index a73a70a1a0c07..561ee98787dee 100644 --- a/compiler/rustc_target/src/callconv/xtensa.rs +++ b/compiler/rustc_target/src/callconv/xtensa.rs @@ -48,7 +48,7 @@ where } let size = arg.layout.size.bits(); - let needed_align = arg.layout.align.abi.bits(); + let needed_align = arg.layout.align.bits(); let mut must_use_stack = false; // Determine the number of GPRs needed to pass the current argument diff --git a/compiler/rustc_transmute/src/layout/tree.rs b/compiler/rustc_transmute/src/layout/tree.rs index a02e8ecf613f4..7f626e8c4e886 100644 --- a/compiler/rustc_transmute/src/layout/tree.rs +++ b/compiler/rustc_transmute/src/layout/tree.rs @@ -361,7 +361,7 @@ pub(crate) mod rustc { ty::Ref(region, ty, mutability) => { let layout = layout_of(cx, *ty)?; - let referent_align = layout.align.abi.bytes_usize(); + let referent_align = layout.align.bytes_usize(); let referent_size = layout.size.bytes_usize(); Ok(Tree::Ref(Reference { diff --git a/compiler/rustc_ty_utils/src/layout.rs b/compiler/rustc_ty_utils/src/layout.rs index f59bc2117d537..317d101dafe05 100644 --- a/compiler/rustc_ty_utils/src/layout.rs +++ b/compiler/rustc_ty_utils/src/layout.rs @@ -795,7 +795,7 @@ fn variant_info_for_adt<'tcx>( name, offset: offset.bytes(), size: field_layout.size.bytes(), - align: field_layout.align.abi.bytes(), + align: field_layout.align.bytes(), type_name: None, } }) @@ -804,7 +804,7 @@ fn variant_info_for_adt<'tcx>( VariantInfo { name: n, kind: if layout.is_unsized() { SizeKind::Min } else { SizeKind::Exact }, - align: layout.align.abi.bytes(), + align: layout.align.bytes(), size: if min_size.bytes() == 0 { layout.size.bytes() } else { min_size.bytes() }, fields: field_info, } @@ -877,7 +877,7 @@ fn variant_info_for_coroutine<'tcx>( name: *name, offset: offset.bytes(), size: field_layout.size.bytes(), - align: field_layout.align.abi.bytes(), + align: field_layout.align.bytes(), type_name: None, } }) @@ -905,7 +905,7 @@ fn variant_info_for_coroutine<'tcx>( }), offset: offset.bytes(), size: field_layout.size.bytes(), - align: field_layout.align.abi.bytes(), + align: field_layout.align.bytes(), // Include the type name if there is no field name, or if the name is the // __awaitee placeholder symbol which means a child future being `.await`ed. type_name: (field_name.is_none() || field_name == Some(sym::__awaitee)) @@ -946,7 +946,7 @@ fn variant_info_for_coroutine<'tcx>( name: Some(Symbol::intern(&ty::CoroutineArgs::variant_name(variant_idx))), kind: SizeKind::Exact, size: variant_size.bytes(), - align: variant_layout.align.abi.bytes(), + align: variant_layout.align.bytes(), fields, } }) diff --git a/compiler/rustc_ty_utils/src/layout/invariant.rs b/compiler/rustc_ty_utils/src/layout/invariant.rs index 1311ee31182c6..b768269215fa2 100644 --- a/compiler/rustc_ty_utils/src/layout/invariant.rs +++ b/compiler/rustc_ty_utils/src/layout/invariant.rs @@ -8,7 +8,7 @@ use rustc_middle::ty::layout::{HasTyCtxt, LayoutCx, TyAndLayout}; pub(super) fn layout_sanity_check<'tcx>(cx: &LayoutCx<'tcx>, layout: &TyAndLayout<'tcx>) { let tcx = cx.tcx(); - if !layout.size.bytes().is_multiple_of(layout.align.abi.bytes()) { + if !layout.size.bytes().is_multiple_of(layout.align.bytes()) { bug!("size is not a multiple of align, in the following layout:\n{layout:#?}"); } if layout.size.bytes() >= tcx.data_layout.obj_size_bound() { @@ -300,8 +300,8 @@ pub(super) fn layout_sanity_check<'tcx>(cx: &LayoutCx<'tcx>, layout: &TyAndLayou if variant.align.abi > layout.align.abi { bug!( "Type with alignment {} bytes has variant with alignment {} bytes: {layout:#?}", - layout.align.abi.bytes(), - variant.align.abi.bytes(), + layout.align.bytes(), + variant.align.bytes(), ) } // Skip empty variants. diff --git a/src/tools/clippy/clippy_lints/src/casts/cast_ptr_alignment.rs b/src/tools/clippy/clippy_lints/src/casts/cast_ptr_alignment.rs index d78da9396faf3..7d14ba7fcf132 100644 --- a/src/tools/clippy/clippy_lints/src/casts/cast_ptr_alignment.rs +++ b/src/tools/clippy/clippy_lints/src/casts/cast_ptr_alignment.rs @@ -43,8 +43,8 @@ fn lint_cast_ptr_alignment<'tcx>(cx: &LateContext<'tcx>, expr: &Expr<'_>, cast_f expr.span, format!( "casting from `{cast_from}` to a more-strictly-aligned pointer (`{cast_to}`) ({} < {} bytes)", - from_layout.align.abi.bytes(), - to_layout.align.abi.bytes(), + from_layout.align.bytes(), + to_layout.align.bytes(), ), ); } diff --git a/src/tools/clippy/clippy_lints/src/casts/manual_dangling_ptr.rs b/src/tools/clippy/clippy_lints/src/casts/manual_dangling_ptr.rs index 92910cf8adf5d..ff5320719aa28 100644 --- a/src/tools/clippy/clippy_lints/src/casts/manual_dangling_ptr.rs +++ b/src/tools/clippy/clippy_lints/src/casts/manual_dangling_ptr.rs @@ -72,7 +72,7 @@ fn is_literal_aligned(cx: &LateContext<'_>, lit: &Spanned, to: &Ty<'_>) cx.tcx .layout_of(cx.typing_env().as_query_input(to_mid_ty)) .is_ok_and(|layout| { - let align = u128::from(layout.align.abi.bytes()); + let align = u128::from(layout.align.bytes()); u128::from(val) <= align }) } diff --git a/src/tools/miri/src/machine.rs b/src/tools/miri/src/machine.rs index d307636e782fc..412640a112c09 100644 --- a/src/tools/miri/src/machine.rs +++ b/src/tools/miri/src/machine.rs @@ -1341,7 +1341,7 @@ impl<'tcx> Machine<'tcx> for MiriMachine<'tcx> { name = ecx.tcx.def_path_str(def_id), krate = ecx.tcx.crate_name(def_id.krate), decl_size = extern_decl_layout.size.bytes(), - decl_align = extern_decl_layout.align.abi.bytes(), + decl_align = extern_decl_layout.align.bytes(), shim_size = info.size.bytes(), shim_align = info.align.bytes(), ) diff --git a/src/tools/rust-analyzer/crates/hir-ty/src/layout/tests.rs b/src/tools/rust-analyzer/crates/hir-ty/src/layout/tests.rs index 523ddad94666b..8be5eaca63b80 100644 --- a/src/tools/rust-analyzer/crates/hir-ty/src/layout/tests.rs +++ b/src/tools/rust-analyzer/crates/hir-ty/src/layout/tests.rs @@ -150,7 +150,7 @@ fn check_size_and_align( ) { let l = eval_goal(ra_fixture, minicore).unwrap(); assert_eq!(l.size.bytes(), size, "size mismatch"); - assert_eq!(l.align.abi.bytes(), align, "align mismatch"); + assert_eq!(l.align.bytes(), align, "align mismatch"); } #[track_caller] @@ -162,7 +162,7 @@ fn check_size_and_align_expr( ) { let l = eval_expr(ra_fixture, minicore).unwrap(); assert_eq!(l.size.bytes(), size, "size mismatch"); - assert_eq!(l.align.abi.bytes(), align, "align mismatch"); + assert_eq!(l.align.bytes(), align, "align mismatch"); } #[track_caller] diff --git a/src/tools/rust-analyzer/crates/hir-ty/src/mir/eval.rs b/src/tools/rust-analyzer/crates/hir-ty/src/mir/eval.rs index 3e658cb93ed8a..fc7d97fff4659 100644 --- a/src/tools/rust-analyzer/crates/hir-ty/src/mir/eval.rs +++ b/src/tools/rust-analyzer/crates/hir-ty/src/mir/eval.rs @@ -2085,7 +2085,7 @@ impl<'db> Evaluator<'db> { if let Some(layout) = self.layout_cache.borrow().get(&ty.to_nextsolver(interner)) { return Ok(layout .is_sized() - .then(|| (layout.size.bytes_usize(), layout.align.abi.bytes() as usize))); + .then(|| (layout.size.bytes_usize(), layout.align.bytes() as usize))); } if let DefWithBodyId::VariantId(f) = locals.body.owner && let Some((AdtId::EnumId(e), _)) = ty.as_adt() @@ -2104,7 +2104,7 @@ impl<'db> Evaluator<'db> { let layout = layout?; Ok(layout .is_sized() - .then(|| (layout.size.bytes_usize(), layout.align.abi.bytes() as usize))) + .then(|| (layout.size.bytes_usize(), layout.align.bytes() as usize))) } /// A version of `self.size_of` which returns error if the type is unsized. `what` argument should @@ -2797,7 +2797,7 @@ impl<'db> Evaluator<'db> { )?; // FIXME: there is some leak here let size = layout.size.bytes_usize(); - let addr = self.heap_allocate(size, layout.align.abi.bytes() as usize)?; + let addr = self.heap_allocate(size, layout.align.bytes() as usize)?; self.write_memory(addr, &result)?; IntervalAndTy { interval: Interval { addr, size }, ty } }; diff --git a/src/tools/rust-analyzer/crates/hir-ty/src/mir/eval/shim.rs b/src/tools/rust-analyzer/crates/hir-ty/src/mir/eval/shim.rs index f67778b0f12f3..38480493048ec 100644 --- a/src/tools/rust-analyzer/crates/hir-ty/src/mir/eval/shim.rs +++ b/src/tools/rust-analyzer/crates/hir-ty/src/mir/eval/shim.rs @@ -767,7 +767,7 @@ impl Evaluator<'_> { "align_of generic arg is not provided".into(), )); }; - let align = self.layout(ty.to_nextsolver(interner))?.align.abi.bytes(); + let align = self.layout(ty.to_nextsolver(interner))?.align.bytes(); destination.write_from_bytes(self, &align.to_le_bytes()[0..destination.size]) } "size_of_val" => { @@ -1431,7 +1431,7 @@ impl Evaluator<'_> { field_types.iter().next_back().unwrap().1.clone().substitute(Interner, subst); let sized_part_size = layout.fields.offset(field_types.iter().count() - 1).bytes_usize(); - let sized_part_align = layout.align.abi.bytes() as usize; + let sized_part_align = layout.align.bytes() as usize; let (unsized_part_size, unsized_part_align) = self.size_align_of_unsized(&last_field_ty, metadata, locals)?; let align = sized_part_align.max(unsized_part_align) as isize; diff --git a/src/tools/rust-analyzer/crates/hir/src/lib.rs b/src/tools/rust-analyzer/crates/hir/src/lib.rs index 027a386abe8ce..17767955d474f 100644 --- a/src/tools/rust-analyzer/crates/hir/src/lib.rs +++ b/src/tools/rust-analyzer/crates/hir/src/lib.rs @@ -6094,7 +6094,7 @@ impl Layout { } pub fn align(&self) -> u64 { - self.0.align.abi.bytes() + self.0.align.bytes() } pub fn niches(&self) -> Option {