-
Notifications
You must be signed in to change notification settings - Fork 15.4k
[CIR] Upstream support for builtin_constant_p #170354
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Conversation
This upstreams the handler for the BI__builtin_constant_p function.
|
This replaces #166832 which wasn't making progress. |
|
@llvm/pr-subscribers-clang @llvm/pr-subscribers-clangir Author: Andy Kaylor (andykaylor) ChangesThis upstreams the handler for the BI__builtin_constant_p function. Full diff: https://github.com/llvm/llvm-project/pull/170354.diff 4 Files Affected:
diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td
index 4b64fc56c57ad..1d41129f5e0c2 100644
--- a/clang/include/clang/CIR/Dialect/IR/CIROps.td
+++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td
@@ -1173,6 +1173,34 @@ def CIR_SwitchOp : CIR_Op<"switch", [
let hasLLVMLowering = false;
}
+//===----------------------------------------------------------------------===//
+// IsConstantOp
+//===----------------------------------------------------------------------===//
+
+def CIR_IsConstantOp : CIR_Op<"is_constant", [Pure]> {
+ let description = [{
+ Returns `true` if the argument is known to be a manifest compile-time
+ constant otherwise returns `false`. If the argument is a constant expression
+ which refers to a global (the address of which _is_ a constant, but not
+ manifest during the compile), then the intrinsic evaluates to `false`.
+
+ This is used to represent `__builtin_constant_p` in cases where the argument
+ isn't known to be constant during initial translation of the source code but
+ might be proven to be constant after later optimizations.
+
+ Example:
+ ```
+ %1 = cir.is_constant %2 : !s32i -> !cir.bool
+ ```
+ }];
+ let arguments = (ins CIR_AnyType:$val);
+ let results = (outs CIR_BoolType:$result);
+
+ let assemblyFormat = [{
+ $val `:` qualified(type($val)) `->` qualified(type($result)) attr-dict
+ }];
+}
+
//===----------------------------------------------------------------------===//
// SwitchFlatOp
//===----------------------------------------------------------------------===//
diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp
index e14b5f8aac337..65949ef7233f5 100644
--- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp
@@ -542,6 +542,45 @@ RValue CIRGenFunction::emitBuiltinExpr(const GlobalDecl &gd, unsigned builtinID,
return emitCall(e->getCallee()->getType(), CIRGenCallee::forDirect(fnOp), e,
returnValue);
}
+
+ case Builtin::BI__builtin_constant_p: {
+ mlir::Type resultType = convertType(e->getType());
+
+ const Expr *arg = e->getArg(0);
+ QualType argType = arg->getType();
+ // FIXME: The allowance for Obj-C pointers and block pointers is historical
+ // and likely a mistake.
+ if (!argType->isIntegralOrEnumerationType() && !argType->isFloatingType() &&
+ !argType->isObjCObjectPointerType() && !argType->isBlockPointerType()) {
+ // Per the GCC documentation, only numeric constants are recognized after
+ // inlining.
+ return RValue::get(
+ builder.getConstInt(getLoc(e->getSourceRange()),
+ mlir::cast<cir::IntType>(resultType), 0));
+ }
+
+ if (arg->HasSideEffects(getContext())) {
+ // The argument is unevaluated, so be conservative if it might have
+ // side-effects.
+ return RValue::get(
+ builder.getConstInt(getLoc(e->getSourceRange()),
+ mlir::cast<cir::IntType>(resultType), 0));
+ }
+
+ mlir::Value argValue = emitScalarExpr(arg);
+ if (argType->isObjCObjectPointerType()) {
+ cgm.errorNYI(e->getSourceRange(),
+ "__builtin_constant_p: Obj-C object pointer");
+ return {};
+ }
+ argValue = builder.createBitcast(argValue, convertType(argType));
+
+ mlir::Value result = cir::IsConstantOp::create(
+ builder, getLoc(e->getSourceRange()), argValue);
+ if (result.getType() != resultType)
+ result = builder.createBoolToInt(result, resultType);
+ return RValue::get(result);
+ }
case Builtin::BI__builtin_dynamic_object_size:
case Builtin::BI__builtin_object_size: {
unsigned type =
diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
index 8e9780754f68f..40e14474890dc 100644
--- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
+++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
@@ -3979,6 +3979,13 @@ mlir::LogicalResult CIRToLLVMGetBitfieldOpLowering::matchAndRewrite(
return mlir::success();
}
+mlir::LogicalResult CIRToLLVMIsConstantOpLowering::matchAndRewrite(
+ cir::IsConstantOp op, OpAdaptor adaptor,
+ mlir::ConversionPatternRewriter &rewriter) const {
+ rewriter.replaceOpWithNewOp<mlir::LLVM::IsConstantOp>(op, adaptor.getVal());
+ return mlir::success();
+}
+
mlir::LogicalResult CIRToLLVMInlineAsmOpLowering::matchAndRewrite(
cir::InlineAsmOp op, OpAdaptor adaptor,
mlir::ConversionPatternRewriter &rewriter) const {
diff --git a/clang/test/CIR/CodeGenBuiltins/builtin-constant-p.c b/clang/test/CIR/CodeGenBuiltins/builtin-constant-p.c
new file mode 100644
index 0000000000000..d684659216cba
--- /dev/null
+++ b/clang/test/CIR/CodeGenBuiltins/builtin-constant-p.c
@@ -0,0 +1,281 @@
+// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -emit-cir %s -o %t.cir
+// RUN: FileCheck --input-file=%t.cir %s -check-prefix=CIR
+// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t-cir.ll
+// RUN: FileCheck --input-file=%t-cir.ll %s -check-prefix=LLVM
+// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -emit-llvm %s -o %t.ll
+// RUN: FileCheck --input-file=%t.ll %s -check-prefix=OGCG
+
+int a = 42;
+
+/* --- Compound literals */
+
+struct foo { int x, y; };
+
+int y;
+struct foo f = (struct foo){ __builtin_constant_p(y), 42 };
+
+// CIR: cir.global external @f = #cir.const_record<{#cir.int<0> : !s32i, #cir.int<42> : !s32i}> : !rec_foo
+// LLVM: @f = global %struct.foo { i32 0, i32 42 }
+// OGCG: @f = global %struct.foo { i32 0, i32 42 }
+
+struct foo test0(int expr) {
+ struct foo f = (struct foo){ __builtin_constant_p(expr), 42 };
+ return f;
+}
+
+// CIR: cir.func {{.*}} @test0(%[[ARG0:.*]]: !s32i {{.*}}) -> !rec_foo
+// CIR: %[[EXPR_ADDR:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["expr", init]
+// CIR: cir.store %[[ARG0]], %[[EXPR_ADDR]]
+// CIR: %[[EXPR:.*]] = cir.load{{.*}} %[[EXPR_ADDR]]
+// CIR: %[[IS_CONSTANT:.*]] = cir.is_constant %[[EXPR]] : !s32i -> !cir.bool
+
+// LLVM: define{{.*}} %struct.foo @test0(i32 %[[ARG0:.*]])
+// LLVM: %[[EXPR_ADDR:.*]] = alloca i32
+// LLVM: store i32 %[[ARG0]], ptr %[[EXPR_ADDR]]
+// LLVM: %[[EXPR:.*]] = load i32, ptr %[[EXPR_ADDR]]
+// LLVM: %[[IS_CONSTANT:.*]] = call i1 @llvm.is.constant.i32(i32 %[[EXPR]])
+
+// OGCG: define{{.*}} i64 @test0(i32 {{.*}} %[[ARG0:.*]])
+// OGCG: %[[EXPR_ADDR:.*]] = alloca i32
+// OGCG: store i32 %[[ARG0]], ptr %[[EXPR_ADDR]]
+// OGCG: %[[EXPR:.*]] = load i32, ptr %[[EXPR_ADDR]]
+// OGCG: %[[IS_CONSTANT:.*]] = call i1 @llvm.is.constant.i32(i32 %[[EXPR]])
+
+/* --- Pointer types */
+
+int test1(void) {
+ return __builtin_constant_p(&a - 13);
+}
+
+// CIR: cir.func {{.*}} @test1() -> !s32i
+// CIR: %[[TMP1:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["__retval"]
+// CIR: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i
+// CIR: cir.store %[[ZERO]], %[[TMP1]] : !s32i, !cir.ptr<!s32i>
+// CIR: %[[TMP2:.*]] = cir.load %[[TMP1]] : !cir.ptr<!s32i>, !s32i
+// CIR: cir.return %[[TMP2]] : !s32i
+
+// LLVM: define{{.*}} i32 @test1()
+// LLVM: %[[TMP1:.*]] = alloca i32
+// LLVM: store i32 0, ptr %[[TMP1]]
+// LLVM: %[[TMP2:.*]] = load i32, ptr %[[TMP1]]
+// LLVM: ret i32 %[[TMP2]]
+
+// OGCG: define{{.*}} i32 @test1()
+// OGCG: ret i32 0
+
+/* --- Aggregate types */
+
+int b[] = {1, 2, 3};
+
+int test2(void) {
+ return __builtin_constant_p(b);
+}
+
+// CIR: cir.func {{.*}} @test2() -> !s32i
+// CIR: %[[TMP1:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["__retval"]
+// CIR: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i
+// CIR: cir.store %[[ZERO]], %[[TMP1]] : !s32i, !cir.ptr<!s32i>
+// CIR: %[[TMP2:.*]] = cir.load %[[TMP1]] : !cir.ptr<!s32i>, !s32i
+// CIR: cir.return %[[TMP2]] : !s32i
+
+// LLVM: define{{.*}} i32 @test2()
+// LLVM: %[[TMP1:.*]] = alloca i32
+// LLVM: store i32 0, ptr %[[TMP1]]
+// LLVM: %[[TMP2:.*]] = load i32, ptr %[[TMP1]]
+// LLVM: ret i32 %[[TMP2]]
+
+// OGCG: define{{.*}} i32 @test2()
+// OGCG: ret i32 0
+
+const char test3_c[] = {1, 2, 3, 0};
+
+int test3(void) {
+ return __builtin_constant_p(test3_c);
+}
+
+// CIR: cir.func {{.*}} @test3() -> !s32i
+// CIR: %[[TMP1:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["__retval"]
+// CIR: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i
+// CIR: cir.store %[[ZERO]], %[[TMP1]] : !s32i, !cir.ptr<!s32i>
+// CIR: %[[TMP2:.*]] = cir.load %[[TMP1]] : !cir.ptr<!s32i>, !s32i
+// CIR: cir.return %[[TMP2]] : !s32i
+
+// LLVM: define{{.*}} i32 @test3()
+// LLVM: %[[TMP1:.*]] = alloca i32
+// LLVM: store i32 0, ptr %[[TMP1]]
+// LLVM: %[[TMP2:.*]] = load i32, ptr %[[TMP1]]
+// LLVM: ret i32 %[[TMP2]]
+
+// OGCG: define{{.*}} i32 @test3()
+// OGCG: ret i32 0
+
+inline char test4_i(const char *x) {
+ return x[1];
+}
+
+int test4(void) {
+ return __builtin_constant_p(test4_i(test3_c));
+}
+
+// CIR: cir.func {{.*}} @test4() -> !s32i
+// CIR: %[[TMP1:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["__retval"]
+// CIR: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i
+// CIR: cir.store %[[ZERO]], %[[TMP1]] : !s32i, !cir.ptr<!s32i>
+// CIR: %[[TMP2:.*]] = cir.load %[[TMP1]] : !cir.ptr<!s32i>, !s32i
+// CIR: cir.return %[[TMP2]] : !s32i
+
+// LLVM: define{{.*}} i32 @test4()
+// LLVM: %[[TMP1:.*]] = alloca i32
+// LLVM: store i32 0, ptr %[[TMP1]]
+// LLVM: %[[TMP2:.*]] = load i32, ptr %[[TMP1]]
+// LLVM: ret i32 %[[TMP2]]
+
+// OGCG: define{{.*}} i32 @test4()
+// OGCG: ret i32 0
+
+/* --- Constant global variables */
+
+const int c = 42;
+
+int test5(void) {
+ return __builtin_constant_p(c);
+}
+
+// CIR: cir.func {{.*}} @test5() -> !s32i
+// CIR: %[[TMP1:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["__retval"]
+// CIR: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
+// CIR: cir.store %[[ONE]], %[[TMP1]] : !s32i, !cir.ptr<!s32i>
+// CIR: %[[TMP2:.*]] = cir.load %[[TMP1]] : !cir.ptr<!s32i>, !s32i
+// CIR: cir.return %[[TMP2]] : !s32i
+
+// LLVM: define{{.*}} i32 @test5()
+// LLVM: %[[TMP1:.*]] = alloca i32
+// LLVM: store i32 1, ptr %[[TMP1]]
+// LLVM: %[[TMP2:.*]] = load i32, ptr %[[TMP1]]
+// LLVM: ret i32 %[[TMP2]]
+
+// OGCG: define{{.*}} i32 @test5()
+// OGCG: ret i32 1
+
+/* --- Array types */
+
+int arr[] = { 1, 2, 3 };
+
+int test6(void) {
+ return __builtin_constant_p(arr[2]);
+}
+
+// CIR: cir.func {{.*}} @test6() -> !s32i
+// CIR: %[[TWO:.*]] = cir.const #cir.int<2> : !s32i
+// CIR: %[[ARR:.*]] = cir.get_global @arr : !cir.ptr<!cir.array<!s32i x 3>>
+// CIR: %[[ARR_PTR:.*]] = cir.cast array_to_ptrdecay %[[ARR]] : !cir.ptr<!cir.array<!s32i x 3>> -> !cir.ptr<!s32i>
+// CIR: %[[ELE_PTR:.*]] = cir.ptr_stride %[[ARR_PTR]], %[[TWO]] : (!cir.ptr<!s32i>, !s32i) -> !cir.ptr<!s32i>
+// CIR: %[[ELE:.*]] = cir.load{{.*}} %[[ELE_PTR]] : !cir.ptr<!s32i>, !s32i
+// CIR: %[[IS_CONSTANT:.*]] = cir.is_constant %[[ELE]] : !s32i -> !cir.bool
+
+// LLVM: define {{.*}} i32 @test6()
+// LLVM: %[[TMP1:.*]] = load i32, ptr getelementptr inbounds nuw (i8, ptr @arr, i64 8)
+// LLVM: %[[TMP2:.*]] = call i1 @llvm.is.constant.i32(i32 %[[TMP1]])
+
+// OGCG: define {{.*}} i32 @test6()
+// OGCG: %[[TMP1:.*]] = load i32, ptr getelementptr inbounds ([3 x i32], ptr @arr, i64 0, i64 2)
+// OGCG: %[[TMP2:.*]] = call i1 @llvm.is.constant.i32(i32 %[[TMP1]])
+
+const int c_arr[] = { 1, 2, 3 };
+
+int test7(void) {
+ return __builtin_constant_p(c_arr[2]);
+}
+
+// CIR: cir.func {{.*}} @test7() -> !s32i
+// CIR: %[[TMP1:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["__retval"]
+// CIR: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
+// CIR: cir.store %[[ONE]], %[[TMP1]] : !s32i, !cir.ptr<!s32i>
+// CIR: %[[TMP2:.*]] = cir.load %[[TMP1]] : !cir.ptr<!s32i>, !s32i
+// CIR: cir.return %[[TMP2]] : !s32i
+
+// LLVM: define{{.*}} i32 @test7()
+// LLVM: %[[TMP1:.*]] = alloca i32
+// LLVM: store i32 1, ptr %[[TMP1]]
+// LLVM: %[[TMP2:.*]] = load i32, ptr %[[TMP1]]
+// LLVM: ret i32 %[[TMP2]]
+
+// OGCG: define{{.*}} i32 @test7()
+// OGCG: ret i32 1
+
+int test8(void) {
+ return __builtin_constant_p(c_arr);
+}
+
+// CIR: cir.func {{.*}} @test8() -> !s32i
+// CIR: %[[TMP1:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["__retval"]
+// CIR: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i
+// CIR: cir.store %[[ZERO]], %[[TMP1]] : !s32i, !cir.ptr<!s32i>
+// CIR: %[[TMP2:.*]] = cir.load %[[TMP1]] : !cir.ptr<!s32i>, !s32i
+// CIR: cir.return %[[TMP2]] : !s32i
+
+// LLVM: define{{.*}} i32 @test8()
+// LLVM: %[[TMP1:.*]] = alloca i32
+// LLVM: store i32 0, ptr %[[TMP1]]
+// LLVM: %[[TMP2:.*]] = load i32, ptr %[[TMP1]]
+// LLVM: ret i32 %[[TMP2]]
+
+// OGCG: define{{.*}} i32 @test8()
+// OGCG: ret i32 0
+
+/* --- Function pointers */
+
+int test9(void) {
+ return __builtin_constant_p(&test9);
+}
+
+// CIR: cir.func {{.*}} @test9() -> !s32i
+// CIR: %[[TMP1:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["__retval"]
+// CIR: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i
+// CIR: cir.store %[[ZERO]], %[[TMP1]] : !s32i, !cir.ptr<!s32i>
+// CIR: %[[TMP2:.*]] = cir.load %[[TMP1]] : !cir.ptr<!s32i>, !s32i
+// CIR: cir.return %[[TMP2]] : !s32i
+
+// LLVM: define{{.*}} i32 @test9()
+// LLVM: %[[TMP1:.*]] = alloca i32
+// LLVM: store i32 0, ptr %[[TMP1]]
+// LLVM: %[[TMP2:.*]] = load i32, ptr %[[TMP1]]
+// LLVM: ret i32 %[[TMP2]]
+
+// OGCG: define{{.*}} i32 @test9()
+// OGCG: ret i32 0
+
+int test10(void) {
+ return __builtin_constant_p(&test10 != 0);
+}
+
+// CIR: cir.func {{.*}} @test10() -> !s32i
+// CIR: %[[TMP1:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["__retval"]
+// CIR: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
+// CIR: cir.store %[[ONE]], %[[TMP1]] : !s32i, !cir.ptr<!s32i>
+// CIR: %[[TMP2:.*]] = cir.load %[[TMP1]] : !cir.ptr<!s32i>, !s32i
+// CIR: cir.return %[[TMP2]] : !s32i
+
+// LLVM: define{{.*}} i32 @test10()
+// LLVM: %[[TMP1:.*]] = alloca i32
+// LLVM: store i32 1, ptr %[[TMP1]]
+// LLVM: %[[TMP2:.*]] = load i32, ptr %[[TMP1]]
+// LLVM: ret i32 %[[TMP2]]
+
+// OGCG: define{{.*}} i32 @test10()
+// OGCG: ret i32 1
+
+int test11_f(void);
+void test11(void) {
+ int a, b;
+ (void)__builtin_constant_p((a = b, test11_f()));
+}
+
+// CIR: cir.func {{.*}} @test11()
+// CIR-NOT: call {{.*}}test11_f
+
+// LLVM: define{{.*}} void @test11()
+// LLVM-NOT: call {{.*}}test11_f
+
+// OGCG: define{{.*}} void @test11()
+// OGCG-NOT: call {{.*}}test11_f
|
xlauko
left a comment
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
lgtm % nits
| // IsConstantOp | ||
| //===----------------------------------------------------------------------===// | ||
|
|
||
| def CIR_IsConstantOp : CIR_Op<"is_constant", [Pure]> { |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
please add summary
|
|
||
| mlir::Value result = cir::IsConstantOp::create( | ||
| builder, getLoc(e->getSourceRange()), argValue); | ||
| if (result.getType() != resultType) |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Isn't this always true as result is enforced to be a cir::BoolType and resultType is cir::IntType?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I think you're right. This was following the behavior of classic codegen, which does the same check on the result type of the llvm.is.constant intrinsic, which always returns i1 so that will also always be true.
This upstreams the handler for the BI__builtin_constant_p function.
This upstreams the handler for the BI__builtin_constant_p function.
This upstreams the handler for the BI__builtin_constant_p function.