Skip to content

Commit 9f9e15f

Browse files
authored
[CIR] Upstream SizeOf for VariableArrayType (#169993)
Upstream SizeOf support for VariableArrayType
1 parent c752bb9 commit 9f9e15f

File tree

2 files changed

+174
-13
lines changed

2 files changed

+174
-13
lines changed

clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp

Lines changed: 18 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -2344,25 +2344,30 @@ mlir::Value ScalarExprEmitter::VisitUnaryExprOrTypeTraitExpr(
23442344
} else {
23452345
// C99 6.5.3.4p2: If the argument is an expression of type
23462346
// VLA, it is evaluated.
2347-
cgf.getCIRGenModule().errorNYI(
2348-
e->getSourceRange(),
2349-
"sizeof operator for VariableArrayType & evaluateExtent "
2350-
"ignoredExpr",
2351-
e->getStmtClassName());
2352-
return {};
2347+
cgf.emitIgnoredExpr(e->getArgumentExpr());
23532348
}
23542349

23552350
// For _Countof, we just want to return the size of a single dimension.
23562351
if (kind == UETT_CountOf)
23572352
return cgf.getVLAElements1D(vat).numElts;
23582353

2359-
cgf.getCIRGenModule().errorNYI(
2360-
e->getSourceRange(),
2361-
"sizeof operator for VariableArrayType & evaluateExtent",
2362-
e->getStmtClassName());
2363-
return builder.getConstant(
2364-
loc, cir::IntAttr::get(cgf.cgm.uInt64Ty,
2365-
-llvm::APSInt(llvm::APInt(64, 1), true)));
2354+
// For sizeof and __datasizeof, we need to scale the number of elements
2355+
// by the size of the array element type.
2356+
CIRGenFunction::VlaSizePair vlaSize = cgf.getVLASize(vat);
2357+
mlir::Value numElts = vlaSize.numElts;
2358+
2359+
// Scale the number of non-VLA elements by the non-VLA element size.
2360+
CharUnits eltSize = cgf.getContext().getTypeSizeInChars(vlaSize.type);
2361+
if (!eltSize.isOne()) {
2362+
mlir::Location loc = cgf.getLoc(e->getSourceRange());
2363+
mlir::Value eltSizeValue =
2364+
builder.getConstAPInt(numElts.getLoc(), numElts.getType(),
2365+
cgf.cgm.getSize(eltSize).getValue());
2366+
return builder.createMul(loc, eltSizeValue, numElts,
2367+
cir::OverflowBehavior::NoUnsignedWrap);
2368+
}
2369+
2370+
return numElts;
23662371
}
23672372
}
23682373
} else if (e->getKind() == UETT_OpenMPRequiredSimdAlign) {
Lines changed: 156 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,156 @@
1+
// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -Wno-unused-value -fclangir -emit-cir %s -o %t.cir
2+
// RUN: FileCheck --input-file=%t.cir %s -check-prefix=CIR
3+
// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -Wno-unused-value -fclangir -emit-llvm %s -o %t-cir.ll
4+
// RUN: FileCheck --input-file=%t-cir.ll %s -check-prefix=LLVM
5+
// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -Wno-unused-value -emit-llvm %s -o %t.ll
6+
// RUN: FileCheck --input-file=%t.ll %s -check-prefix=OGCG
7+
8+
void vla_type_with_element_type_of_size_1() {
9+
unsigned long n = 10ul;
10+
unsigned long size = sizeof(bool[n]);
11+
}
12+
13+
// CIR: %[[N_ADDR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["n", init]
14+
// CIR: %[[SIZE_ADDR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["size", init]
15+
// CIR: %[[CONST_10:.*]] = cir.const #cir.int<10> : !u64i
16+
// CIR: cir.store {{.*}} %[[CONST_10]], %[[N_ADDR]] : !u64i, !cir.ptr<!u64i>
17+
// CIR: %[[TMP_N:.*]] = cir.load {{.*}} %[[N_ADDR]] : !cir.ptr<!u64i>, !u64i
18+
// CIR: cir.store {{.*}} %[[TMP_N]], %[[SIZE_ADDR]] : !u64i, !cir.ptr<!u64i>
19+
20+
// LLVM: %[[N_ADDR:.*]] = alloca i64, i64 1, align 8
21+
// LLVM: %[[SIZE_ADDR:.*]] = alloca i64, i64 1, align 8
22+
// LLVM: store i64 10, ptr %[[N_ADDR]], align 8
23+
// LLVM: %[[TMP_N:.*]] = load i64, ptr %[[N_ADDR]], align 8
24+
// LLVM: store i64 %[[TMP_N]], ptr %[[SIZE_ADDR]], align 8
25+
26+
// OGCG: %[[N_ADDR:.*]] = alloca i64, align 8
27+
// OGCG: %[[SIZE_ADDR:.*]] = alloca i64, align 8
28+
// OGCG: store i64 10, ptr %[[N_ADDR]], align 8
29+
// OGCG: %[[TMP_N:.*]] = load i64, ptr %[[N_ADDR]], align 8
30+
// OGCG: store i64 %[[TMP_N]], ptr %[[SIZE_ADDR]], align 8
31+
32+
void vla_type_with_element_type_int() {
33+
unsigned long n = 10ul;
34+
unsigned long size = sizeof(int[n]);
35+
}
36+
37+
// CIR: %[[N_ADDR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["n", init]
38+
// CIR: %[[SIZE_ADDR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["size", init]
39+
// CIR: %[[CONST_10:.*]] = cir.const #cir.int<10> : !u64i
40+
// CIR: cir.store {{.*}} %[[CONST_10]], %[[N_ADDR]] : !u64i, !cir.ptr<!u64i>
41+
// CIR: %3 = cir.load {{.*}} %[[N_ADDR]] : !cir.ptr<!u64i>, !u64i
42+
// CIR: %[[CONST_4:.*]] = cir.const #cir.int<4> : !u64i
43+
// CIR: %[[SIZE:.*]] = cir.binop(mul, %[[CONST_4]], %3) nuw : !u64i
44+
// CIR: cir.store {{.*}} %[[SIZE]], %[[SIZE_ADDR]] : !u64i, !cir.ptr<!u64i>
45+
46+
// LLVM: %[[N_ADDR:.*]] = alloca i64, i64 1, align 8
47+
// LLVM: %[[SIZE_ADDR:.*]] = alloca i64, i64 1, align 8
48+
// LLVM: store i64 10, ptr %[[N_ADDR]], align 8
49+
// LLVM: %[[TMP_N:.*]] = load i64, ptr %[[N_ADDR]], align 8
50+
// LLVM: %[[SIZE:.*]] = mul nuw i64 4, %[[TMP_N]]
51+
// LLVM: store i64 %[[SIZE]], ptr %[[SIZE_ADDR]], align 8
52+
53+
// OGCG: %[[N_ADDR:.*]] = alloca i64, align 8
54+
// OGCG: %[[SIZE_ADDR:.*]] = alloca i64, align 8
55+
// OGCG: store i64 10, ptr %[[N_ADDR]], align 8
56+
// OGCG: %[[TMP_N:.*]] = load i64, ptr %[[N_ADDR]], align 8
57+
// OGCG: %[[SIZE:.*]] = mul nuw i64 4, %[[TMP_N]]
58+
// OGCG: store i64 %[[SIZE]], ptr %[[SIZE_ADDR]], align 8
59+
60+
void vla_expr_element_type_of_size_1() {
61+
unsigned long n = 10ul;
62+
bool arr[n];
63+
unsigned long size = sizeof(arr);
64+
}
65+
66+
// CIR: %[[N_ADDR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["n", init]
67+
// CIR: %[[SAVED_STACK_ADDR:.*]] = cir.alloca !cir.ptr<!u8i>, !cir.ptr<!cir.ptr<!u8i>>, ["saved_stack"]
68+
// CIR: %[[CONST_10:.*]] = cir.const #cir.int<10> : !u64i
69+
// CIR: cir.store {{.*}} %[[CONST_10]], %[[N_ADDR]] : !u64i, !cir.ptr<!u64i>
70+
// CIR: %[[TMP_N:.*]] = cir.load {{.*}} %[[N_ADDR]] : !cir.ptr<!u64i>, !u64i
71+
// CIR: %[[STACK_SAVE:.*]] = cir.stacksave : !cir.ptr<!u8i>
72+
// CIR: cir.store {{.*}} %[[STACK_SAVE]], %[[SAVED_STACK_ADDR]] : !cir.ptr<!u8i>, !cir.ptr<!cir.ptr<!u8i>>
73+
// CIR: %[[ARR_ADDR:.*]] = cir.alloca !cir.bool, !cir.ptr<!cir.bool>, %[[TMP_N]] : !u64i, ["arr"]
74+
// CIR: %[[SIZE_ADDR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["size", init]
75+
// CIR: cir.store {{.*}} %[[TMP_N]], %[[SIZE_ADDR]] : !u64i, !cir.ptr<!u64i>
76+
// CIR: %[[TMP_SAVED_STACK:.*]] = cir.load {{.*}} %[[SAVED_STACK_ADDR]] : !cir.ptr<!cir.ptr<!u8i>>, !cir.ptr<!u8i>
77+
// CIR: cir.stackrestore %[[TMP_SAVED_STACK]] : !cir.ptr<!u8i>
78+
79+
// LLVM: %[[N_ADDR:.*]] = alloca i64, i64 1, align 8
80+
// LLVM: %[[SAVED_STACK_ADDR:.*]] = alloca ptr, i64 1, align 8
81+
// LLVM: store i64 10, ptr %[[N_ADDR]], align 8
82+
// LLVM: %[[TMP_N:.*]] = load i64, ptr %[[N_ADDR]], align 8
83+
// LLVM: %[[STACK_SAVE:.*]] = call ptr @llvm.stacksave.p0()
84+
// LLVM: store ptr %[[STACK_SAVE]], ptr %[[SAVED_STACK_ADDR]], align 8
85+
// LLVM: %[[ARR_ADDR:.*]] = alloca i8, i64 %[[TMP_N]], align 16
86+
// LLVM: %[[SIZE_ADDR:.*]] = alloca i64, i64 1, align 8
87+
// LLVM: store i64 %[[TMP_N]], ptr %[[SIZE_ADDR]], align 8
88+
// LLVM: %[[TMP_SAVED_STACK:.*]] = load ptr, ptr %[[SAVED_STACK_ADDR]], align 8
89+
// LLVM: call void @llvm.stackrestore.p0(ptr %[[TMP_SAVED_STACK]])
90+
91+
// Note: VLA_EXPR0 below is emitted to capture debug info.
92+
93+
// OGCG: %[[N_ADDR:.*]] = alloca i64, align 8
94+
// OGCG: %[[SAVED_STACK_ADDR:.*]] = alloca ptr, align 8
95+
// OGCG: %[[VLA_EXPR0:.*]] = alloca i64, align 8
96+
// OGCG: %[[SIZE_ADDR:.*]] = alloca i64, align 8
97+
// OGCG: store i64 10, ptr %[[N_ADDR]], align 8
98+
// OGCG: %[[TMP_N:.*]] = load i64, ptr %[[N_ADDR]], align 8
99+
// OGCG: %[[STACK_SAVE:.*]] = call ptr @llvm.stacksave.p0()
100+
// OGCG: store ptr %[[STACK_SAVE]], ptr %[[SAVED_STACK_ADDR]], align 8
101+
// OGCG: %[[ARR_ADDR:.*]] = alloca i8, i64 %[[TMP_N]], align 16
102+
// OGCG: store i64 %[[TMP_N]], ptr %[[VLA_EXPR0]], align 8
103+
// OGCG: store i64 %[[TMP_N]], ptr %[[SIZE_ADDR]], align 8
104+
// OGCG: %[[TMP_SAVED_STACK:.*]] = load ptr, ptr %[[SAVED_STACK_ADDR]], align 8
105+
// OGCG: call void @llvm.stackrestore.p0(ptr %[[TMP_SAVED_STACK]])
106+
107+
void vla_expr_element_type_int() {
108+
unsigned long n = 10ul;
109+
int arr[n];
110+
unsigned long size = sizeof(arr);
111+
}
112+
113+
// CIR: %[[N_ADDR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["n", init]
114+
// CIR: %[[SAVED_STACK_ADDR:.*]] = cir.alloca !cir.ptr<!u8i>, !cir.ptr<!cir.ptr<!u8i>>, ["saved_stack"]
115+
// CIR: %[[CONST_10:.*]] = cir.const #cir.int<10> : !u64i
116+
// CIR: cir.store {{.*}} %[[CONST_10]], %[[N_ADDR]] : !u64i, !cir.ptr<!u64i>
117+
// CIR: %[[TMP_N:.*]] = cir.load {{.*}} %[[N_ADDR]] : !cir.ptr<!u64i>, !u64i
118+
// CIR: %[[STACK_SAVE:.*]] = cir.stacksave : !cir.ptr<!u8i>
119+
// CIR: cir.store {{.*}} %[[STACK_SAVE]], %[[SAVED_STACK_ADDR]] : !cir.ptr<!u8i>, !cir.ptr<!cir.ptr<!u8i>>
120+
// CIR: %[[ARR_ADDR:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, %[[TMP_N]] : !u64i, ["arr"]
121+
// CIR: %[[SIZE_ADDR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["size", init]
122+
// CIR: %[[CONST_4:.*]] = cir.const #cir.int<4> : !u64i
123+
// CIR: %[[SIZE:.*]] = cir.binop(mul, %[[CONST_4]], %[[TMP_N]]) nuw : !u64i
124+
// CIR: cir.store {{.*}} %[[SIZE]], %[[SIZE_ADDR]] : !u64i, !cir.ptr<!u64i>
125+
// CIR: %[[TMP_SAVED_STACK:.*]] = cir.load {{.*}} %[[SAVED_STACK_ADDR]] : !cir.ptr<!cir.ptr<!u8i>>, !cir.ptr<!u8i>
126+
// CIR: cir.stackrestore %[[TMP_SAVED_STACK]] : !cir.ptr<!u8i>
127+
128+
// LLVM: %[[N_ADDR:.*]] = alloca i64, i64 1, align 8
129+
// LLVM: %[[SAVED_STACK_ADDR:.*]] = alloca ptr, i64 1, align 8
130+
// LLVM: store i64 10, ptr %[[N_ADDR]], align 8
131+
// LLVM: %[[TMP_N:.*]] = load i64, ptr %[[N_ADDR]], align 8
132+
// LLVM: %[[STACK_SAVE:.*]] = call ptr @llvm.stacksave.p0()
133+
// LLVM: store ptr %[[STACK_SAVE]], ptr %[[SAVED_STACK_ADDR]], align 8
134+
// LLVM: %[[ARR_ADDR:.*]] = alloca i32, i64 %[[TMP_N]], align 16
135+
// LLVM: %[[SIZE_ADDR:.*]] = alloca i64, i64 1, align 8
136+
// LLVM: %[[SIZE:.*]] = mul nuw i64 4, %[[TMP_N]]
137+
// LLVM: store i64 %[[SIZE]], ptr %[[SIZE_ADDR]], align 8
138+
// LLVM: %[[TMP_SAVED_STACK:.*]] = load ptr, ptr %[[SAVED_STACK_ADDR]], align 8
139+
// LLVM: call void @llvm.stackrestore.p0(ptr %[[TMP_SAVED_STACK]])
140+
141+
// Note: VLA_EXPR0 below is emitted to capture debug info.
142+
143+
// OGCG: %[[N_ADDR:.*]] = alloca i64, align 8
144+
// OGCG: %[[SAVED_STACK_ADDR:.*]] = alloca ptr, align 8
145+
// OGCG: %[[VLA_EXPR0:.*]] = alloca i64, align 8
146+
// OGCG: %[[SIZE_ADDR:.*]] = alloca i64, align 8
147+
// OGCG: store i64 10, ptr %[[N_ADDR]], align 8
148+
// OGCG: %[[TMP_N:.*]] = load i64, ptr %[[N_ADDR]], align 8
149+
// OGCG: %[[STACK_SAVE:.*]] = call ptr @llvm.stacksave.p0()
150+
// OGCG: store ptr %[[STACK_SAVE]], ptr %[[SAVED_STACK_ADDR]], align 8
151+
// OGCG: %[[ARR_ADDR:.*]] = alloca i32, i64 %[[TMP_N]], align 16
152+
// OGCG: store i64 %[[TMP_N]], ptr %[[VLA_EXPR0]], align 8
153+
// OGCG: %[[SIZE:.*]] = mul nuw i64 4, %[[TMP_N]]
154+
// OGCG: store i64 %[[SIZE]], ptr %[[SIZE_ADDR]], align 8
155+
// OGCG: %[[TMP_SAVED_STACK:.*]] = load ptr, ptr %[[SAVED_STACK_ADDR]], align 8
156+
// OGCG: call void @llvm.stackrestore.p0(ptr %[[TMP_SAVED_STACK]])

0 commit comments

Comments
 (0)