diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index 5ccb431e626ae..4fb178df0e508 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -1630,7 +1630,7 @@ RValue CIRGenFunction::emitAnyExpr(const Expr *e, AggValueSlot aggSlot, bool ignoreResult) { switch (CIRGenFunction::getEvaluationKind(e->getType())) { case cir::TEK_Scalar: - return RValue::get(emitScalarExpr(e)); + return RValue::get(emitScalarExpr(e, ignoreResult)); case cir::TEK_Complex: return RValue::getComplex(emitComplexExpr(e)); case cir::TEK_Aggregate: { diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index 119314fe27dce..5eba5ba6c3df1 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -78,11 +78,15 @@ struct BinOpInfo { class ScalarExprEmitter : public StmtVisitor { CIRGenFunction &cgf; CIRGenBuilderTy &builder; + // Unlike classic codegen we set this to false or use std::exchange to read + // the value instead of calling TestAndClearIgnoreResultAssign to make it + // explicit when the value is used bool ignoreResultAssign; public: - ScalarExprEmitter(CIRGenFunction &cgf, CIRGenBuilderTy &builder) - : cgf(cgf), builder(builder) {} + ScalarExprEmitter(CIRGenFunction &cgf, CIRGenBuilderTy &builder, + bool ignoreResultAssign = false) + : cgf(cgf), builder(builder), ignoreResultAssign(ignoreResultAssign) {} //===--------------------------------------------------------------------===// // Utilities @@ -221,6 +225,8 @@ class ScalarExprEmitter : public StmtVisitor { } mlir::Value VisitArraySubscriptExpr(ArraySubscriptExpr *e) { + ignoreResultAssign = false; + if (e->getBase()->getType()->isVectorType()) { assert(!cir::MissingFeatures::scalableVectors()); @@ -839,6 +845,7 @@ class ScalarExprEmitter : public StmtVisitor { BinOpInfo emitBinOps(const BinaryOperator *e, QualType promotionType = QualType()) { + ignoreResultAssign = false; BinOpInfo result; result.lhs = cgf.emitPromotedScalarExpr(e->getLHS(), promotionType); result.rhs = cgf.emitPromotedScalarExpr(e->getRHS(), promotionType); @@ -924,6 +931,7 @@ class ScalarExprEmitter : public StmtVisitor { #undef HANDLEBINOP mlir::Value emitCmp(const BinaryOperator *e) { + ignoreResultAssign = false; const mlir::Location loc = cgf.getLoc(e->getExprLoc()); mlir::Value result; QualType lhsTy = e->getLHS()->getType(); @@ -1406,11 +1414,13 @@ CIRGenFunction::emitCompoundAssignmentLValue(const CompoundAssignOperator *e) { } /// Emit the computation of the specified expression of scalar type. -mlir::Value CIRGenFunction::emitScalarExpr(const Expr *e) { +mlir::Value CIRGenFunction::emitScalarExpr(const Expr *e, + bool ignoreResultAssign) { assert(e && hasScalarEvaluationKind(e->getType()) && "Invalid scalar expression to emit"); - return ScalarExprEmitter(*this, builder).Visit(const_cast(e)); + return ScalarExprEmitter(*this, builder, ignoreResultAssign) + .Visit(const_cast(e)); } mlir::Value CIRGenFunction::emitPromotedScalarExpr(const Expr *e, @@ -2054,6 +2064,11 @@ mlir::Value ScalarExprEmitter::VisitMemberExpr(MemberExpr *e) { mlir::Value ScalarExprEmitter::VisitInitListExpr(InitListExpr *e) { const unsigned numInitElements = e->getNumInits(); + [[maybe_unused]] const bool ignore = std::exchange(ignoreResultAssign, false); + assert((ignore == false || + (numInitElements == 0 && e->getType()->isVoidType())) && + "init list ignored"); + if (e->hadArrayRangeDesignator()) { cgf.cgm.errorNYI(e->getSourceRange(), "ArrayRangeDesignator"); return {}; diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index e5cecaa573a6e..dece642eb13b6 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -1501,7 +1501,8 @@ class CIRGenFunction : public CIRGenTypeCache { llvm::ArrayRef args = {}); /// Emit the computation of the specified expression of scalar type. - mlir::Value emitScalarExpr(const clang::Expr *e); + mlir::Value emitScalarExpr(const clang::Expr *e, + bool ignoreResultAssign = false); mlir::Value emitScalarPrePostIncDec(const UnaryOperator *e, LValue lv, cir::UnaryOpKind kind, bool isPre); diff --git a/clang/test/CIR/CodeGen/binassign.c b/clang/test/CIR/CodeGen/binassign.c index 44c54b4a2969a..4520063c56ee6 100644 --- a/clang/test/CIR/CodeGen/binassign.c +++ b/clang/test/CIR/CodeGen/binassign.c @@ -100,3 +100,107 @@ void binary_assign_struct() { // OGCG: call void @llvm.memcpy.p0.p0.i64(ptr align 4 %[[LS_PTR]], ptr align 4 @gs, i64 8, i1 false) // OGCG: call void @llvm.memcpy.p0.p0.i64(ptr align 4 %[[LSV_PTR]], ptr align 4 @gsv, i64 8, i1 true) // OGCG: ret void + +int ignore_result_assign() { + int arr[10]; + int i, j; + j = i = 123, 0; + j = arr[i = 5]; + int *p, *q = 0; + if(p = q) + return 1; + return 0; +} + +// CIR-LABEL: cir.func{{.*}} @ignore_result_assign() -> !s32i +// CIR: %[[RETVAL:.*]] = cir.alloca !s32i, !cir.ptr, ["__retval"] +// CIR: %[[ARR:.*]] = cir.alloca !cir.array, !cir.ptr>, ["arr"] +// CIR: %[[I:.*]] = cir.alloca !s32i, !cir.ptr, ["i"] +// CIR: %[[J:.*]] = cir.alloca !s32i, !cir.ptr, ["j"] +// CIR: %[[P:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["p"] +// CIR: %[[Q:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["q", init] +// CIR: %[[VAL_123:.*]] = cir.const #cir.int<123> : !s32i +// CIR: cir.store{{.*}} %[[VAL_123]], %[[I]] : !s32i, !cir.ptr +// CIR: cir.store{{.*}} %[[VAL_123]], %[[J]] : !s32i, !cir.ptr +// CIR: %[[VAL_0:.*]] = cir.const #cir.int<0> : !s32i +// CIR: %[[VAL_5:.*]] = cir.const #cir.int<5> : !s32i +// CIR: cir.store{{.*}} %[[VAL_5]], %[[I]] : !s32i, !cir.ptr +// CIR: %[[ARR_DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARR]] : !cir.ptr> -> !cir.ptr +// CIR: %[[ARR_ELEM:.*]] = cir.ptr_stride %[[ARR_DECAY]], %[[VAL_5]] : (!cir.ptr, !s32i) -> !cir.ptr +// CIR: %[[ARR_LOAD:.*]] = cir.load{{.*}} %[[ARR_ELEM]] : !cir.ptr, !s32i +// CIR: cir.store{{.*}} %[[ARR_LOAD]], %[[J]] : !s32i, !cir.ptr +// CIR: %[[NULL:.*]] = cir.const #cir.ptr : !cir.ptr +// CIR: cir.store{{.*}} %[[NULL]], %[[Q]] : !cir.ptr, !cir.ptr> +// CIR: cir.scope { +// CIR: %[[Q_VAL:.*]] = cir.load{{.*}} %[[Q]] : !cir.ptr>, !cir.ptr +// CIR: cir.store{{.*}} %[[Q_VAL]], %[[P]] : !cir.ptr, !cir.ptr> +// CIR: %[[COND:.*]] = cir.cast ptr_to_bool %[[Q_VAL]] : !cir.ptr -> !cir.bool +// CIR: cir.if %[[COND]] { +// CIR: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i +// CIR: cir.store %[[ONE]], %[[RETVAL]] : !s32i, !cir.ptr +// CIR: %{{.*}} = cir.load %[[RETVAL]] : !cir.ptr, !s32i +// CIR: cir.return +// CIR: } +// CIR: } +// CIR: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i +// CIR: cir.store %[[ZERO]], %[[RETVAL]] : !s32i, !cir.ptr +// CIR: %{{.*}} = cir.load %[[RETVAL]] : !cir.ptr, !s32i +// CIR: cir.return + +// LLVM-LABEL: define {{.*}}i32 @ignore_result_assign() +// LLVM: %[[RETVAL_PTR:.*]] = alloca i32 +// LLVM: %[[ARR_PTR:.*]] = alloca [10 x i32] +// LLVM: %[[I_PTR:.*]] = alloca i32 +// LLVM: %[[J_PTR:.*]] = alloca i32 +// LLVM: %[[P_PTR:.*]] = alloca ptr +// LLVM: %[[Q_PTR:.*]] = alloca ptr +// LLVM: store i32 123, ptr %[[I_PTR]] +// LLVM: store i32 123, ptr %[[J_PTR]] +// LLVM: store i32 5, ptr %[[I_PTR]] +// LLVM: %[[GEP1:.*]] = getelementptr i32, ptr %[[ARR_PTR]], i32 0 +// LLVM: %[[GEP2:.*]] = getelementptr i32, ptr %[[GEP1]], i64 5 +// LLVM: %[[ARR_VAL:.*]] = load i32, ptr %[[GEP2]] +// LLVM: store i32 %[[ARR_VAL]], ptr %[[J_PTR]] +// LLVM: store ptr null, ptr %[[Q_PTR]] +// LLVM: br label +// LLVM: %[[Q_VAL:.*]] = load ptr, ptr %[[Q_PTR]] +// LLVM: store ptr %[[Q_VAL]], ptr %[[P_PTR]] +// LLVM: %[[CMP:.*]] = icmp ne ptr %[[Q_VAL]], null +// LLVM: br i1 %[[CMP]], label %[[THEN:.*]], label %[[ELSE:.*]] +// LLVM: [[THEN]]: +// LLVM: store i32 1, ptr %[[RETVAL_PTR]] +// LLVM: %{{.*}} = load i32, ptr %[[RETVAL_PTR]] +// LLVM: ret i32 +// LLVM: [[ELSE]]: +// LLVM: br label +// LLVM: store i32 0, ptr %[[RETVAL_PTR]] +// LLVM: %{{.*}} = load i32, ptr %[[RETVAL_PTR]] +// LLVM: ret i32 + +// OGCG-LABEL: define {{.*}}i32 @ignore_result_assign() +// OGCG: %[[RETVAL:.*]] = alloca i32 +// OGCG: %[[ARR:.*]] = alloca [10 x i32] +// OGCG: %[[I:.*]] = alloca i32 +// OGCG: %[[J:.*]] = alloca i32 +// OGCG: %[[P:.*]] = alloca ptr +// OGCG: %[[Q:.*]] = alloca ptr +// OGCG: store i32 123, ptr %[[I]] +// OGCG: store i32 123, ptr %[[J]] +// OGCG: store i32 5, ptr %[[I]] +// OGCG: %[[ARRAYIDX:.*]] = getelementptr inbounds [10 x i32], ptr %[[ARR]], i64 0, i64 5 +// OGCG: %[[ARR_VAL:.*]] = load i32, ptr %[[ARRAYIDX]] +// OGCG: store i32 %[[ARR_VAL]], ptr %[[J]] +// OGCG: store ptr null, ptr %[[Q]] +// OGCG: %[[Q_VAL:.*]] = load ptr, ptr %[[Q]] +// OGCG: store ptr %[[Q_VAL]], ptr %[[P]] +// OGCG: %[[TOBOOL:.*]] = icmp ne ptr %[[Q_VAL]], null +// OGCG: br i1 %[[TOBOOL]], label %[[IF_THEN:.*]], label %[[IF_END:.*]] +// OGCG: [[IF_THEN]]: +// OGCG: store i32 1, ptr %[[RETVAL]] +// OGCG: br label %[[RETURN:.*]] +// OGCG: [[IF_END]]: +// OGCG: store i32 0, ptr %[[RETVAL]] +// OGCG: br label %[[RETURN]] +// OGCG: [[RETURN]]: +// OGCG: %{{.*}} = load i32, ptr %[[RETVAL]] +// OGCG: ret i32