@@ -16,7 +16,7 @@ use rustc_target::abi::{HasDataLayout, Size, VariantIdx, Variants};
16
16
use super::{
17
17
alloc_range, mir_assign_valid_types, AllocId, AllocRef, AllocRefMut, CheckInAllocMsg,
18
18
ConstAlloc, ImmTy, Immediate, InterpCx, InterpResult, LocalValue, Machine, MemoryKind, OpTy,
19
- Operand, Pointer, PointerArithmetic, Provenance, Scalar, ScalarMaybeUninit,
19
+ Operand, Pointer, Provenance, Scalar, ScalarMaybeUninit,
20
20
};
21
21
22
22
#[derive(Copy, Clone, Hash, PartialEq, Eq, HashStable, Debug)]
@@ -700,24 +700,7 @@ where
700
700
src: Immediate<M::PointerTag>,
701
701
dest: &PlaceTy<'tcx, M::PointerTag>,
702
702
) -> InterpResult<'tcx> {
703
- if cfg!(debug_assertions) {
704
- // This is a very common path, avoid some checks in release mode
705
- assert!(!dest.layout.is_unsized(), "Cannot write unsized data");
706
- match src {
707
- Immediate::Scalar(ScalarMaybeUninit::Scalar(Scalar::Ptr(..))) => assert_eq!(
708
- self.pointer_size(),
709
- dest.layout.size,
710
- "Size mismatch when writing pointer"
711
- ),
712
- Immediate::Scalar(ScalarMaybeUninit::Scalar(Scalar::Int(int))) => {
713
- assert_eq!(int.size(), dest.layout.size, "Size mismatch when writing bits")
714
- }
715
- Immediate::Scalar(ScalarMaybeUninit::Uninit) => {} // uninit can have any size
716
- Immediate::ScalarPair(_, _) => {
717
- // FIXME: Can we check anything here?
718
- }
719
- }
720
- }
703
+ assert!(!dest.layout.is_unsized(), "Cannot write unsized data");
721
704
trace!("write_immediate: {:?} <- {:?}: {}", *dest, src, dest.layout.ty);
722
705
723
706
// See if we can avoid an allocation. This is the counterpart to `read_immediate_raw`,
@@ -753,31 +736,27 @@ where
753
736
dest: &MPlaceTy<'tcx, M::PointerTag>,
754
737
) -> InterpResult<'tcx> {
755
738
// Note that it is really important that the type here is the right one, and matches the
756
- // type things are read at. In case `src_val ` is a `ScalarPair`, we don't do any magic here
739
+ // type things are read at. In case `value ` is a `ScalarPair`, we don't do any magic here
757
740
// to handle padding properly, which is only correct if we never look at this data with the
758
741
// wrong type.
759
742
760
- // Invalid places are a thing: the return place of a diverging function
761
743
let tcx = *self.tcx;
762
744
let Some(mut alloc) = self.get_place_alloc_mut(dest)? else {
763
745
// zero-sized access
764
746
return Ok(());
765
747
};
766
748
767
- // FIXME: We should check that there are dest.layout.size many bytes available in
768
- // memory. The code below is not sufficient, with enough padding it might not
769
- // cover all the bytes!
770
749
match value {
771
750
Immediate::Scalar(scalar) => {
772
- match dest.layout.abi {
773
- Abi::Scalar(_) => {} // fine
774
- _ => span_bug!(
751
+ let Abi::Scalar(s) = dest.layout.abi else { span_bug!(
775
752
self.cur_span(),
776
753
"write_immediate_to_mplace: invalid Scalar layout: {:#?}",
777
754
dest.layout
778
- ),
779
- }
780
- alloc.write_scalar(alloc_range(Size::ZERO, dest.layout.size), scalar)
755
+ )
756
+ };
757
+ let size = s.size(&tcx);
758
+ //FIXME(#96185): assert_eq!(dest.layout.size, size, "abi::Scalar size does not match layout size");
759
+ alloc.write_scalar(alloc_range(Size::ZERO, size), scalar)
781
760
}
782
761
Immediate::ScalarPair(a_val, b_val) => {
783
762
// We checked `ptr_align` above, so all fields will have the alignment they need.
@@ -791,6 +770,7 @@ where
791
770
};
792
771
let (a_size, b_size) = (a.size(&tcx), b.size(&tcx));
793
772
let b_offset = a_size.align_to(b.align(&tcx).abi);
773
+ assert!(b_offset.bytes() > 0); // in `operand_field` we use the offset to tell apart the fields
794
774
795
775
// It is tempting to verify `b_offset` against `layout.fields.offset(1)`,
796
776
// but that does not work: We could be a newtype around a pair, then the
0 commit comments