@@ -91,6 +91,9 @@ struct CIRRecordLowering final {
9191 return astContext.getTargetInfo ().getABI ().starts_with (" aapcs" );
9292 }
9393
94+ // / Helper function to check if the target machine is BigEndian.
95+ bool isBigEndian () const { return astContext.getTargetInfo ().isBigEndian (); }
96+
9497 CharUnits bitsToCharUnits (uint64_t bitOffset) {
9598 return astContext.toCharUnitsFromBits (bitOffset);
9699 }
@@ -771,7 +774,104 @@ void CIRRecordLowering::computeVolatileBitfields() {
771774 !cirGenTypes.getCGModule ().getCodeGenOpts ().AAPCSBitfieldWidth )
772775 return ;
773776
774- assert (!cir::MissingFeatures::armComputeVolatileBitfields ());
777+ for (auto &[field, info] : bitFields) {
778+ mlir::Type resLTy = cirGenTypes.convertTypeForMem (field->getType ());
779+
780+ if (astContext.toBits (astRecordLayout.getAlignment ()) <
781+ getSizeInBits (resLTy).getQuantity ())
782+ continue ;
783+
784+ // CIRRecordLowering::setBitFieldInfo() pre-adjusts the bit-field offsets
785+ // for big-endian targets, but it assumes a container of width
786+ // info.storageSize. Since AAPCS uses a different container size (width
787+ // of the type), we first undo that calculation here and redo it once
788+ // the bit-field offset within the new container is calculated.
789+ const unsigned oldOffset =
790+ isBigEndian () ? info.storageSize - (info.offset + info.size )
791+ : info.offset ;
792+ // Offset to the bit-field from the beginning of the struct.
793+ const unsigned absoluteOffset =
794+ astContext.toBits (info.storageOffset ) + oldOffset;
795+
796+ // Container size is the width of the bit-field type.
797+ const unsigned storageSize = getSizeInBits (resLTy).getQuantity ();
798+ // Nothing to do if the access uses the desired
799+ // container width and is naturally aligned.
800+ if (info.storageSize == storageSize && (oldOffset % storageSize == 0 ))
801+ continue ;
802+
803+ // Offset within the container.
804+ unsigned offset = absoluteOffset & (storageSize - 1 );
805+ // Bail out if an aligned load of the container cannot cover the entire
806+ // bit-field. This can happen for example, if the bit-field is part of a
807+ // packed struct. AAPCS does not define access rules for such cases, we let
808+ // clang to follow its own rules.
809+ if (offset + info.size > storageSize)
810+ continue ;
811+
812+ // Re-adjust offsets for big-endian targets.
813+ if (isBigEndian ())
814+ offset = storageSize - (offset + info.size );
815+
816+ const CharUnits storageOffset =
817+ astContext.toCharUnitsFromBits (absoluteOffset & ~(storageSize - 1 ));
818+ const CharUnits end = storageOffset +
819+ astContext.toCharUnitsFromBits (storageSize) -
820+ CharUnits::One ();
821+
822+ const ASTRecordLayout &layout =
823+ astContext.getASTRecordLayout (field->getParent ());
824+ // If we access outside memory outside the record, than bail out.
825+ const CharUnits recordSize = layout.getSize ();
826+ if (end >= recordSize)
827+ continue ;
828+
829+ // Bail out if performing this load would access non-bit-fields members.
830+ bool conflict = false ;
831+ for (const auto *f : recordDecl->fields ()) {
832+ // Allow sized bit-fields overlaps.
833+ if (f->isBitField () && !f->isZeroLengthBitField ())
834+ continue ;
835+
836+ const CharUnits fOffset = astContext.toCharUnitsFromBits (
837+ layout.getFieldOffset (f->getFieldIndex ()));
838+
839+ // As C11 defines, a zero sized bit-field defines a barrier, so
840+ // fields after and before it should be race condition free.
841+ // The AAPCS acknowledges it and imposes no restritions when the
842+ // natural container overlaps a zero-length bit-field.
843+ if (f->isZeroLengthBitField ()) {
844+ if (end > fOffset && storageOffset < fOffset ) {
845+ conflict = true ;
846+ break ;
847+ }
848+ }
849+
850+ const CharUnits fEnd =
851+ fOffset +
852+ astContext.toCharUnitsFromBits (astContext.toBits (
853+ getSizeInBits (cirGenTypes.convertTypeForMem (f->getType ())))) -
854+ CharUnits::One ();
855+ // If no overlap, continue.
856+ if (end < fOffset || fEnd < storageOffset)
857+ continue ;
858+
859+ // The desired load overlaps a non-bit-field member, bail out.
860+ conflict = true ;
861+ break ;
862+ }
863+
864+ if (conflict)
865+ continue ;
866+ // Write the new bit-field access parameters.
867+ // As the storage offset now is defined as the number of elements from the
868+ // start of the structure, we should divide the Offset by the element size.
869+ info.volatileStorageOffset =
870+ storageOffset /
871+ astContext.toCharUnitsFromBits (storageSize).getQuantity ();
872+ info.volatileStorageSize = storageSize;
873+ info.volatileOffset = offset;
874+ }
775875}
776876
777877void CIRRecordLowering::accumulateBases (const CXXRecordDecl *cxxRecordDecl) {
0 commit comments