Skip to content

Commit 9f7f3d6

Browse files
[CIR] Implemented get/set for volatile bitfields (#151875)
This PR adds support for loading and storing volatile bit-field members according to the AAPCS specification. > A volatile bit-field must always be accessed using an access width appropriate to the type of its container, except when any of the following are true: > > * The bit-field container overlaps with a zero-length bit-field. > * The bit-field container overlaps with a non-bit-field member. For example, if a bit-field is declared as `int`, the load/store must use a 32-bit access, even if the field itself is only 3 bits wide.
1 parent f58bc72 commit 9f7f3d6

File tree

4 files changed

+268
-31
lines changed

4 files changed

+268
-31
lines changed

clang/lib/CIR/CodeGen/CIRGenBuilder.h

Lines changed: 23 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -410,21 +410,37 @@ class CIRGenBuilderTy : public cir::CIRBaseBuilderTy {
410410
mlir::Value createSetBitfield(mlir::Location loc, mlir::Type resultType,
411411
Address dstAddr, mlir::Type storageType,
412412
mlir::Value src, const CIRGenBitFieldInfo &info,
413-
bool isLvalueVolatile) {
413+
bool isLvalueVolatile, bool useVolatile) {
414+
unsigned offset = useVolatile ? info.volatileOffset : info.offset;
415+
416+
// If using AAPCS and the field is volatile, load with the size of the
417+
// declared field
418+
storageType =
419+
useVolatile ? cir::IntType::get(storageType.getContext(),
420+
info.volatileStorageSize, info.isSigned)
421+
: storageType;
414422
return create<cir::SetBitfieldOp>(
415423
loc, resultType, dstAddr.getPointer(), storageType, src, info.name,
416-
info.size, info.offset, info.isSigned, isLvalueVolatile,
424+
info.size, offset, info.isSigned, isLvalueVolatile,
417425
dstAddr.getAlignment().getAsAlign().value());
418426
}
419427

420428
mlir::Value createGetBitfield(mlir::Location loc, mlir::Type resultType,
421429
Address addr, mlir::Type storageType,
422430
const CIRGenBitFieldInfo &info,
423-
bool isLvalueVolatile) {
424-
return create<cir::GetBitfieldOp>(
425-
loc, resultType, addr.getPointer(), storageType, info.name, info.size,
426-
info.offset, info.isSigned, isLvalueVolatile,
427-
addr.getAlignment().getAsAlign().value());
431+
bool isLvalueVolatile, bool useVolatile) {
432+
unsigned offset = useVolatile ? info.volatileOffset : info.offset;
433+
434+
// If using AAPCS and the field is volatile, load with the size of the
435+
// declared field
436+
storageType =
437+
useVolatile ? cir::IntType::get(storageType.getContext(),
438+
info.volatileStorageSize, info.isSigned)
439+
: storageType;
440+
return create<cir::GetBitfieldOp>(loc, resultType, addr.getPointer(),
441+
storageType, info.name, info.size, offset,
442+
info.isSigned, isLvalueVolatile,
443+
addr.getAlignment().getAsAlign().value());
428444
}
429445
};
430446

clang/lib/CIR/CodeGen/CIRGenExpr.cpp

Lines changed: 17 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -322,22 +322,28 @@ void CIRGenFunction::emitStoreOfScalar(mlir::Value value, Address addr,
322322
assert(!cir::MissingFeatures::opTBAA());
323323
}
324324

325+
// TODO: Replace this with a proper TargetInfo function call.
326+
/// Helper method to check if the underlying ABI is AAPCS
327+
static bool isAAPCS(const TargetInfo &targetInfo) {
328+
return targetInfo.getABI().starts_with("aapcs");
329+
}
330+
325331
mlir::Value CIRGenFunction::emitStoreThroughBitfieldLValue(RValue src,
326332
LValue dst) {
327333

328-
assert(!cir::MissingFeatures::armComputeVolatileBitfields());
329-
330334
const CIRGenBitFieldInfo &info = dst.getBitFieldInfo();
331335
mlir::Type resLTy = convertTypeForMem(dst.getType());
332336
Address ptr = dst.getBitFieldAddress();
333337

334-
assert(!cir::MissingFeatures::armComputeVolatileBitfields());
338+
bool useVoaltile = cgm.getCodeGenOpts().AAPCSBitfieldWidth &&
339+
dst.isVolatileQualified() &&
340+
info.volatileStorageSize != 0 && isAAPCS(cgm.getTarget());
335341

336342
mlir::Value dstAddr = dst.getAddress().getPointer();
337343

338344
return builder.createSetBitfield(dstAddr.getLoc(), resLTy, ptr,
339345
ptr.getElementType(), src.getValue(), info,
340-
dst.isVolatileQualified());
346+
dst.isVolatileQualified(), useVoaltile);
341347
}
342348

343349
RValue CIRGenFunction::emitLoadOfBitfieldLValue(LValue lv, SourceLocation loc) {
@@ -347,10 +353,12 @@ RValue CIRGenFunction::emitLoadOfBitfieldLValue(LValue lv, SourceLocation loc) {
347353
mlir::Type resLTy = convertType(lv.getType());
348354
Address ptr = lv.getBitFieldAddress();
349355

350-
assert(!cir::MissingFeatures::armComputeVolatileBitfields());
356+
bool useVoaltile = lv.isVolatileQualified() && info.volatileOffset != 0 &&
357+
isAAPCS(cgm.getTarget());
351358

352-
mlir::Value field = builder.createGetBitfield(
353-
getLoc(loc), resLTy, ptr, ptr.getElementType(), info, lv.isVolatile());
359+
mlir::Value field =
360+
builder.createGetBitfield(getLoc(loc), resLTy, ptr, ptr.getElementType(),
361+
info, lv.isVolatile(), useVoaltile);
354362
assert(!cir::MissingFeatures::opLoadEmitScalarRangeCheck() && "NYI");
355363
return RValue::get(field);
356364
}
@@ -375,10 +383,10 @@ LValue CIRGenFunction::emitLValueForBitField(LValue base,
375383
const CIRGenRecordLayout &layout =
376384
cgm.getTypes().getCIRGenRecordLayout(field->getParent());
377385
const CIRGenBitFieldInfo &info = layout.getBitFieldInfo(field);
378-
assert(!cir::MissingFeatures::armComputeVolatileBitfields());
386+
379387
assert(!cir::MissingFeatures::preservedAccessIndexRegion());
380-
unsigned idx = layout.getCIRFieldNo(field);
381388

389+
unsigned idx = layout.getCIRFieldNo(field);
382390
Address addr = getAddrOfBitFieldStorage(base, field, info.storageType, idx);
383391

384392
mlir::Location loc = getLoc(field->getLocation());

clang/lib/CIR/CodeGen/CIRGenRecordLayoutBuilder.cpp

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -847,8 +847,9 @@ void CIRRecordLowering::computeVolatileBitfields() {
847847

848848
const CharUnits fEnd =
849849
fOffset +
850-
astContext.toCharUnitsFromBits(astContext.toBits(
851-
getSizeInBits(cirGenTypes.convertTypeForMem(f->getType())))) -
850+
astContext.toCharUnitsFromBits(
851+
getSizeInBits(cirGenTypes.convertTypeForMem(f->getType()))
852+
.getQuantity()) -
852853
CharUnits::One();
853854
// If no overlap, continue.
854855
if (end < fOffset || fEnd < storageOffset)

clang/test/CIR/CodeGen/aapcs-volatile-bitfields.c

Lines changed: 225 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,13 @@
11
// RUN: %clang_cc1 -triple aarch64-unknown-linux-gnu -fclangir -emit-cir -fdump-record-layouts %s -o %t.cir 1> %t.cirlayout
22
// RUN: FileCheck --input-file=%t.cirlayout %s --check-prefix=CIR-LAYOUT
3+
// RUN: FileCheck --input-file=%t.cir %s --check-prefix=CIR
4+
5+
// RUN: %clang_cc1 -triple aarch64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t-cir.ll
6+
// RUN: FileCheck --input-file=%t-cir.ll %s --check-prefix=LLVM
37

48
// RUN: %clang_cc1 -triple aarch64-unknown-linux-gnu -emit-llvm -fdump-record-layouts %s -o %t.ll 1> %t.ogcglayout
59
// RUN: FileCheck --input-file=%t.ogcglayout %s --check-prefix=OGCG-LAYOUT
10+
// RUN: FileCheck --input-file=%t.ll %s --check-prefix=OGCG
611

712
typedef struct {
813
unsigned int a : 9;
@@ -53,21 +58,228 @@ typedef struct{
5358

5459
typedef struct{
5560
volatile unsigned int a : 3;
56-
unsigned int z: 2;
57-
volatile unsigned int b : 5;
61+
unsigned int z;
62+
volatile unsigned long b : 16;
5863
} st4;
5964

6065
// CIR-LAYOUT: BitFields:[
61-
// CIR-LAYOUT-NEXT: <CIRBitFieldInfo name:a offset:0 size:3 isSigned:0 storageSize:16 storageOffset:0 volatileOffset:0 volatileStorageSize:32 volatileStorageOffset:0>
62-
// CIR-LAYOUT-NEXT: <CIRBitFieldInfo name:z offset:3 size:2 isSigned:0 storageSize:16 storageOffset:0 volatileOffset:3 volatileStorageSize:32 volatileStorageOffset:0>
63-
// CIR-LAYOUT-NEXT: <CIRBitFieldInfo name:b offset:5 size:5 isSigned:0 storageSize:16 storageOffset:0 volatileOffset:5 volatileStorageSize:32 volatileStorageOffset:0>
66+
// CIR-LAYOUT-NEXT: <CIRBitFieldInfo name:a offset:0 size:3 isSigned:0 storageSize:8 storageOffset:0 volatileOffset:0 volatileStorageSize:32 volatileStorageOffset:0>
67+
// CIR-LAYOUT-NEXT: <CIRBitFieldInfo name:b offset:0 size:16 isSigned:0 storageSize:16 storageOffset:8 volatileOffset:0 volatileStorageSize:64 volatileStorageOffset:1>
6468

6569
// OGCG-LAYOUT: BitFields:[
66-
// OGCG-LAYOUT-NEXT: <CGBitFieldInfo Offset:0 Size:3 IsSigned:0 StorageSize:16 StorageOffset:0 VolatileOffset:0 VolatileStorageSize:32 VolatileStorageOffset:0>
67-
// OGCG-LAYOUT-NEXT: <CGBitFieldInfo Offset:3 Size:2 IsSigned:0 StorageSize:16 StorageOffset:0 VolatileOffset:3 VolatileStorageSize:32 VolatileStorageOffset:0>
68-
// OGCG-LAYOUT-NEXT: <CGBitFieldInfo Offset:5 Size:5 IsSigned:0 StorageSize:16 StorageOffset:0 VolatileOffset:5 VolatileStorageSize:32 VolatileStorageOffset:0>
69-
70-
st1 s1;
71-
st2 s2;
72-
st3 s3;
73-
st4 s4;
70+
// OGCG-LAYOUT-NEXT: <CGBitFieldInfo Offset:0 Size:3 IsSigned:0 StorageSize:8 StorageOffset:0 VolatileOffset:0 VolatileStorageSize:32 VolatileStorageOffset:0>
71+
// OGCG-LAYOUT-NEXT: <CGBitFieldInfo Offset:0 Size:16 IsSigned:0 StorageSize:16 StorageOffset:8 VolatileOffset:0 VolatileStorageSize:64 VolatileStorageOffset:1>
72+
73+
74+
void def () {
75+
st1 s1;
76+
st2 s2;
77+
st3 s3;
78+
st4 s4;
79+
}
80+
81+
int check_load(st1 *s1) {
82+
return s1->b;
83+
}
84+
85+
// CIR: cir.func dso_local @check_load
86+
// CIR: [[LOAD:%.*]] = cir.load align(8) {{.*}} : !cir.ptr<!cir.ptr<!rec_st1>>, !cir.ptr<!rec_st1>
87+
// CIR: [[MEMBER:%.*]] = cir.get_member [[LOAD]][0] {name = "b"} : !cir.ptr<!rec_st1> -> !cir.ptr<!u16i>
88+
// CIR: [[BITFI:%.*]] = cir.get_bitfield align(4) (#bfi_b, [[MEMBER]] {is_volatile} : !cir.ptr<!u16i>) -> !u32i
89+
// CIR: [[CAST:%.*]] = cir.cast(integral, [[BITFI]] : !u32i), !s32i
90+
// CIR: cir.store [[CAST]], [[RETVAL:%.*]] : !s32i, !cir.ptr<!s32i>
91+
// CIR: [[RET:%.*]] = cir.load [[RETVAL]] : !cir.ptr<!s32i>, !s32i
92+
// CIR: cir.return [[RET]] : !s32i
93+
94+
// LLVM:define dso_local i32 @check_load
95+
// LLVM: [[LOAD:%.*]] = load ptr, ptr {{.*}}, align 8
96+
// LLVM: [[MEMBER:%.*]] = getelementptr %struct.st1, ptr [[LOAD]], i32 0, i32 0
97+
// LLVM: [[LOADVOL:%.*]] = load volatile i32, ptr [[MEMBER]], align 4
98+
// LLVM: [[LSHR:%.*]] = lshr i32 [[LOADVOL]], 9
99+
// LLVM: [[CLEAR:%.*]] = and i32 [[LSHR]], 1
100+
// LLVM: store i32 [[CLEAR]], ptr [[RETVAL:%.*]], align 4
101+
// LLVM: [[RET:%.*]] = load i32, ptr [[RETVAL]], align 4
102+
// LLVM: ret i32 [[RET]]
103+
104+
// OGCG: define dso_local i32 @check_load
105+
// OGCG: [[LOAD:%.*]] = load ptr, ptr {{.*}}, align 8
106+
// OGCG: [[LOADVOL:%.*]] = load volatile i32, ptr [[LOAD]], align 4
107+
// OGCG: [[LSHR:%.*]] = lshr i32 [[LOADVOL]], 9
108+
// OGCG: [[CLEAR:%.*]] = and i32 [[LSHR]], 1
109+
// OGCG: ret i32 [[CLEAR]]
110+
111+
// this volatile bit-field container overlaps with a zero-length bit-field,
112+
// so it may be accessed without using the container's width.
113+
int check_load_exception(st3 *s3) {
114+
return s3->b;
115+
}
116+
117+
// CIR: cir.func dso_local @check_load_exception
118+
// CIR: [[LOAD:%.*]] = cir.load align(8) {{.*}} : !cir.ptr<!cir.ptr<!rec_st3>>, !cir.ptr<!rec_st3>
119+
// CIR: [[MEMBER:%.*]] = cir.get_member [[LOAD]][2] {name = "b"} : !cir.ptr<!rec_st3> -> !cir.ptr<!u8i>
120+
// CIR: [[BITFI:%.*]] = cir.get_bitfield align(4) (#bfi_b1, [[MEMBER]] {is_volatile} : !cir.ptr<!u8i>) -> !u32i
121+
// CIR: [[CAST:%.*]] = cir.cast(integral, [[BITFI]] : !u32i), !s32i
122+
// CIR: cir.store [[CAST]], [[RETVAL:%.*]] : !s32i, !cir.ptr<!s32i>
123+
// CIR: [[RET:%.*]] = cir.load [[RETVAL]] : !cir.ptr<!s32i>, !s32i
124+
// CIR: cir.return [[RET]] : !s32i
125+
126+
// LLVM:define dso_local i32 @check_load_exception
127+
// LLVM: [[LOAD:%.*]] = load ptr, ptr {{.*}}, align 8
128+
// LLVM: [[MEMBER:%.*]] = getelementptr %struct.st3, ptr [[LOAD]], i32 0, i32 2
129+
// LLVM: [[LOADVOL:%.*]] = load volatile i8, ptr [[MEMBER]], align 4
130+
// LLVM: [[CLEAR:%.*]] = and i8 [[LOADVOL]], 31
131+
// LLVM: [[CAST:%.*]] = zext i8 [[CLEAR]] to i32
132+
// LLVM: store i32 [[CAST]], ptr [[RETVAL:%.*]], align 4
133+
// LLVM: [[RET:%.*]] = load i32, ptr [[RETVAL]], align 4
134+
// LLVM: ret i32 [[RET]]
135+
136+
// OGCG: define dso_local i32 @check_load_exception
137+
// OGCG: [[LOAD:%.*]] = load ptr, ptr {{.*}}, align 8
138+
// OGCG: [[MEMBER:%.*]] = getelementptr inbounds nuw %struct.st3, ptr [[LOAD]], i32 0, i32 2
139+
// OGCG: [[LOADVOL:%.*]] = load volatile i8, ptr [[MEMBER]], align 4
140+
// OGCG: [[CLEAR:%.*]] = and i8 [[LOADVOL]], 31
141+
// OGCG: [[CAST:%.*]] = zext i8 [[CLEAR]] to i32
142+
// OGCG: ret i32 [[CAST]]
143+
144+
typedef struct {
145+
volatile int a : 24;
146+
char b;
147+
volatile int c: 30;
148+
} clip;
149+
150+
int clip_load_exception2(clip *c) {
151+
return c->a;
152+
}
153+
154+
// CIR: cir.func dso_local @clip_load_exception2
155+
// CIR: [[LOAD:%.*]] = cir.load align(8) {{.*}} : !cir.ptr<!cir.ptr<!rec_clip>>, !cir.ptr<!rec_clip>
156+
// CIR: [[MEMBER:%.*]] = cir.get_member [[LOAD]][0] {name = "a"} : !cir.ptr<!rec_clip> -> !cir.ptr<!cir.array<!u8i x 3>>
157+
// CIR: [[BITFI:%.*]] = cir.get_bitfield align(4) (#bfi_a1, [[MEMBER]] {is_volatile} : !cir.ptr<!cir.array<!u8i x 3>>) -> !s32i
158+
// CIR: cir.store [[BITFI]], [[RETVAL:%.*]] : !s32i, !cir.ptr<!s32i>
159+
// CIR: [[RET:%.*]] = cir.load [[RETVAL]] : !cir.ptr<!s32i>, !s32i
160+
// CIR: cir.return [[RET]] : !s32i
161+
162+
// LLVM:define dso_local i32 @clip_load_exception2
163+
// LLVM: [[LOAD:%.*]] = load ptr, ptr {{.*}}, align 8
164+
// LLVM: [[MEMBER:%.*]] = getelementptr %struct.clip, ptr [[LOAD]], i32 0, i32 0
165+
// LLVM: [[LOADVOL:%.*]] = load volatile i24, ptr [[MEMBER]], align 4
166+
// LLVM: [[CAST:%.*]] = sext i24 [[LOADVOL]] to i32
167+
// LLVM: store i32 [[CAST]], ptr [[RETVAL:%.*]], align 4
168+
// LLVM: [[RET:%.*]] = load i32, ptr [[RETVAL]], align 4
169+
// LLVM: ret i32 [[RET]]
170+
171+
// OGCG: define dso_local i32 @clip_load_exception2
172+
// OGCG: [[LOAD:%.*]] = load ptr, ptr {{.*}}, align 8
173+
// OGCG: [[LOADVOL:%.*]] = load volatile i24, ptr [[LOAD]], align 4
174+
// OGCG: [[CAST:%.*]] = sext i24 [[LOADVOL]] to i32
175+
// OGCG: ret i32 [[CAST]]
176+
177+
void check_store(st2 *s2) {
178+
s2->a = 1;
179+
}
180+
181+
// CIR: cir.func dso_local @check_store
182+
// CIR: [[CONST:%.*]] = cir.const #cir.int<1> : !s32i
183+
// CIR: [[CAST:%.*]] = cir.cast(integral, [[CONST]] : !s32i), !s16i
184+
// CIR: [[LOAD:%.*]] = cir.load align(8) {{.*}} : !cir.ptr<!cir.ptr<!rec_st2>>, !cir.ptr<!rec_st2>
185+
// CIR: [[MEMBER:%.*]] = cir.get_member [[LOAD]][0] {name = "a"} : !cir.ptr<!rec_st2> -> !cir.ptr<!u32i>
186+
// CIR: [[SETBF:%.*]] = cir.set_bitfield align(8) (#bfi_a, [[MEMBER]] : !cir.ptr<!u32i>, [[CAST]] : !s16i) {is_volatile} -> !s16i
187+
// CIR: cir.return
188+
189+
// LLVM:define dso_local void @check_store
190+
// LLVM: [[LOAD:%.*]] = load ptr, ptr {{.*}}, align 8
191+
// LLVM: [[MEMBER:%.*]] = getelementptr %struct.st2, ptr [[LOAD]], i32 0, i32 0
192+
// LLVM: [[LOADVOL:%.*]] = load volatile i16, ptr [[MEMBER]], align 8
193+
// LLVM: [[CLEAR:%.*]] = and i16 [[LOADVOL]], -8
194+
// LLVM: [[SET:%.*]] = or i16 [[CLEAR]], 1
195+
// LLVM: store volatile i16 [[SET]], ptr [[MEMBER]], align 8
196+
// LLVM: ret void
197+
198+
// OGCG: define dso_local void @check_store
199+
// OGCG: [[LOAD:%.*]] = load ptr, ptr {{.*}}, align 8
200+
// OGCG: [[LOADVOL:%.*]] = load volatile i16, ptr [[LOAD]], align 8
201+
// OGCG: [[CLEAR:%.*]] = and i16 [[LOADVOL]], -8
202+
// OGCG: [[SET:%.*]] = or i16 [[CLEAR]], 1
203+
// OGCG: store volatile i16 [[SET]], ptr [[LOAD]], align 8
204+
// OGCG: ret void
205+
206+
// this volatile bit-field container overlaps with a zero-length bit-field,
207+
// so it may be accessed without using the container's width.
208+
void check_store_exception(st3 *s3) {
209+
s3->b = 2;
210+
}
211+
212+
// CIR: cir.func dso_local @check_store_exception
213+
// CIR: [[CONST:%.*]] = cir.const #cir.int<2> : !s32i
214+
// CIR: [[CAST:%.*]] = cir.cast(integral, [[CONST]] : !s32i), !u32i
215+
// CIR: [[LOAD:%.*]] = cir.load align(8) {{.*}} : !cir.ptr<!cir.ptr<!rec_st3>>, !cir.ptr<!rec_st3>
216+
// CIR: [[MEMBER:%.*]] = cir.get_member [[LOAD]][2] {name = "b"} : !cir.ptr<!rec_st3> -> !cir.ptr<!u8i>
217+
// CIR: [[SETBF:%.*]] = cir.set_bitfield align(4) (#bfi_b1, [[MEMBER]] : !cir.ptr<!u8i>, [[CAST]] : !u32i) {is_volatile} -> !u32i
218+
// CIR: cir.return
219+
220+
// LLVM:define dso_local void @check_store_exception
221+
// LLVM: [[LOAD:%.*]] = load ptr, ptr {{.*}}, align 8
222+
// LLVM: [[MEMBER:%.*]] = getelementptr %struct.st3, ptr [[LOAD]], i32 0, i32 2
223+
// LLVM: [[LOADVOL:%.*]] = load volatile i8, ptr [[MEMBER]], align 4
224+
// LLVM: [[CLEAR:%.*]] = and i8 [[LOADVOL]], -32
225+
// LLVM: [[SET:%.*]] = or i8 [[CLEAR]], 2
226+
// LLVM: store volatile i8 [[SET]], ptr [[MEMBER]], align 4
227+
// LLVM: ret void
228+
229+
// OGCG: define dso_local void @check_store_exception
230+
// OGCG: [[LOAD:%.*]] = load ptr, ptr {{.*}}, align 8
231+
// OGCG: [[MEMBER:%.*]] = getelementptr inbounds nuw %struct.st3, ptr [[LOAD]], i32 0, i32 2
232+
// OGCG: [[LOADVOL:%.*]] = load volatile i8, ptr [[MEMBER]], align 4
233+
// OGCG: [[CLEAR:%.*]] = and i8 [[LOADVOL]], -32
234+
// OGCG: [[SET:%.*]] = or i8 [[CLEAR]], 2
235+
// OGCG: store volatile i8 [[SET]], ptr [[MEMBER]], align 4
236+
// OGCG: ret void
237+
238+
void clip_store_exception2(clip *c) {
239+
c->a = 3;
240+
}
241+
242+
// CIR: cir.func dso_local @clip_store_exception2
243+
// CIR: [[CONST:%.*]] = cir.const #cir.int<3> : !s32i
244+
// CIR: [[LOAD:%.*]] = cir.load align(8) {{.*}} : !cir.ptr<!cir.ptr<!rec_clip>>, !cir.ptr<!rec_clip>
245+
// CIR: [[MEMBER:%.*]] = cir.get_member [[LOAD]][0] {name = "a"} : !cir.ptr<!rec_clip> -> !cir.ptr<!cir.array<!u8i x 3>>
246+
// CIR: [[SETBF:%.*]] = cir.set_bitfield align(4) (#bfi_a1, [[MEMBER]] : !cir.ptr<!cir.array<!u8i x 3>>, [[CONST]] : !s32i) {is_volatile} -> !s32i
247+
// CIR: cir.return
248+
249+
// LLVM:define dso_local void @clip_store_exception2
250+
// LLVM: [[LOAD:%.*]] = load ptr, ptr {{.*}}, align 8
251+
// LLVM: [[MEMBER:%.*]] = getelementptr %struct.clip, ptr [[LOAD]], i32 0, i32 0
252+
// LLVM: store volatile i24 3, ptr [[MEMBER]], align 4
253+
// LLVM: ret void
254+
255+
// OGCG: define dso_local void @clip_store_exception2
256+
// OGCG: [[LOAD:%.*]] = load ptr, ptr {{.*}}, align 8
257+
// OGCG: store volatile i24 3, ptr [[LOAD]], align 4
258+
// OGCG: ret void
259+
260+
void check_store_second_member (st4 *s4) {
261+
s4->b = 1;
262+
}
263+
264+
// CIR: cir.func dso_local @check_store_second_member
265+
// CIR: [[ONE:%.*]] = cir.const #cir.int<1> : !s32i
266+
// CIR: [[CAST:%.*]] = cir.cast(integral, [[ONE]] : !s32i), !u64i
267+
// CIR: [[LOAD:%.*]] = cir.load align(8) {{.*}} : !cir.ptr<!cir.ptr<!rec_st4>>, !cir.ptr<!rec_st4>
268+
// CIR: [[MEMBER:%.*]] = cir.get_member [[LOAD]][2] {name = "b"} : !cir.ptr<!rec_st4> -> !cir.ptr<!u16i>
269+
// CIR: cir.set_bitfield align(8) (#bfi_b2, [[MEMBER]] : !cir.ptr<!u16i>, [[CAST]] : !u64i) {is_volatile} -> !u64i
270+
271+
// LLVM: define dso_local void @check_store_second_member
272+
// LLVM: [[LOAD:%.*]] = load ptr, ptr {{.*}}, align 8
273+
// LLVM: [[MEMBER:%.*]] = getelementptr %struct.st4, ptr [[LOAD]], i32 0, i32 2
274+
// LLVM: [[VAL:%.*]] = load volatile i64, ptr [[MEMBER]], align 8
275+
// LLVM: [[CLEAR:%.*]] = and i64 [[VAL]], -65536
276+
// LLVM: [[SET:%.*]] = or i64 [[CLEAR]], 1
277+
// LLVM: store volatile i64 [[SET]], ptr [[MEMBER]], align 8
278+
279+
// OGCG: define dso_local void @check_store_second_member
280+
// OGCG: [[LOAD:%.*]] = load ptr, ptr {{.*}}, align 8
281+
// OGCG: [[MEMBER:%.*]] = getelementptr inbounds i64, ptr [[LOAD]], i64 1
282+
// OGCG: [[LOADBF:%.*]] = load volatile i64, ptr [[MEMBER]], align 8
283+
// OGCG: [[CLR:%.*]] = and i64 [[LOADBF]], -65536
284+
// OGCG: [[SET:%.*]] = or i64 [[CLR]], 1
285+
// OGCG: store volatile i64 [[SET]], ptr [[MEMBER]], align 8

0 commit comments

Comments
 (0)