diff --git a/clang/lib/CIR/CodeGen/CIRGenCall.cpp b/clang/lib/CIR/CodeGen/CIRGenCall.cpp index 2970b369a85d0..61072f0883728 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCall.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCall.cpp @@ -522,7 +522,8 @@ RValue CIRGenFunction::emitCall(const CIRGenFunctionInfo &funcInfo, assert(!cir::MissingFeatures::opCallPaddingArgs()); mlir::Type argType = convertType(canQualArgType); - if (!mlir::isa(argType)) { + if (!mlir::isa(argType) && + !mlir::isa(argType)) { mlir::Value v; if (arg.isAggregate()) cgm.errorNYI(loc, "emitCall: aggregate call argument"); @@ -540,15 +541,16 @@ RValue CIRGenFunction::emitCall(const CIRGenFunctionInfo &funcInfo, cirCallArgs[argNo] = v; } else { Address src = Address::invalid(); - if (!arg.isAggregate()) - cgm.errorNYI(loc, "emitCall: non-aggregate call argument"); - else + if (!arg.isAggregate()) { + src = createMemTemp(arg.ty, loc, "coerce"); + arg.copyInto(*this, src, loc); + } else { src = arg.hasLValue() ? arg.getKnownLValue().getAddress() : arg.getKnownRValue().getAggregateAddress(); + } // Fast-isel and the optimizer generally like scalar values better than // FCAs, so we flatten them if this is safe to do for this argument. - auto argRecordTy = cast(argType); mlir::Type srcTy = src.getElementType(); // FIXME(cir): get proper location for each argument. mlir::Location argLoc = loc; @@ -564,7 +566,7 @@ RValue CIRGenFunction::emitCall(const CIRGenFunctionInfo &funcInfo, // uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(STy); // if (SrcSize < DstSize) { assert(!cir::MissingFeatures::dataLayoutTypeAllocSize()); - if (srcTy != argRecordTy) { + if (srcTy != argType) { cgm.errorNYI(loc, "emitCall: source type does not match argument type"); } else { // FIXME(cir): this currently only runs when the types are exactly the @@ -676,6 +678,18 @@ RValue CIRGenFunction::emitCall(const CIRGenFunctionInfo &funcInfo, llvm_unreachable("Invalid evaluation kind"); } +void CallArg::copyInto(CIRGenFunction &cgf, Address addr, + mlir::Location loc) const { + LValue dst = cgf.makeAddrLValue(addr, ty); + if (!hasLV && rv.isScalar()) + cgf.cgm.errorNYI(loc, "copyInto scalar value"); + else if (!hasLV && rv.isComplex()) + cgf.emitStoreOfComplex(loc, rv.getComplexValue(), dst, /*isInit=*/true); + else + cgf.cgm.errorNYI(loc, "copyInto hasLV"); + isUsed = true; +} + void CIRGenFunction::emitCallArg(CallArgList &args, const clang::Expr *e, clang::QualType argType) { assert(argType->isReferenceType() == e->isGLValue() && diff --git a/clang/lib/CIR/CodeGen/CIRGenCall.h b/clang/lib/CIR/CodeGen/CIRGenCall.h index 52d541f2b09b5..55b3d9765c5c5 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCall.h +++ b/clang/lib/CIR/CodeGen/CIRGenCall.h @@ -224,6 +224,8 @@ struct CallArg { } bool isAggregate() const { return hasLV || rv.isAggregate(); } + + void copyInto(CIRGenFunction &cgf, Address addr, mlir::Location loc) const; }; class CallArgList : public llvm::SmallVector { diff --git a/clang/test/CIR/CodeGen/complex.cpp b/clang/test/CIR/CodeGen/complex.cpp index ae69b2486efd0..a9ab87e1a1234 100644 --- a/clang/test/CIR/CodeGen/complex.cpp +++ b/clang/test/CIR/CodeGen/complex.cpp @@ -1311,3 +1311,49 @@ void real_on_scalar_from_imag_with_type_promotion() { // OGCG: %[[A_IMAG_F32:.*]] = fpext half %[[A_IMAG]] to float // OGCG: %[[A_IMAG_F16:.*]] = fptrunc float %[[A_IMAG_F32]] to half // OGCG: store half %[[A_IMAG_F16]], ptr %[[B_ADDR]], align 2 + +void complex_type_parameter(float _Complex a) {} + +// CIR: %[[A_ADDR:.*]] = cir.alloca !cir.complex, !cir.ptr>, ["a", init] +// CIR: cir.store %{{.*}}, %[[A_ADDR]] : !cir.complex, !cir.ptr> + +// TODO(CIR): the difference between the CIR LLVM and OGCG is because the lack of calling convention lowering, +// Test will be updated when that is implemented + +// LLVM: %[[A_ADDR:.*]] = alloca { float, float }, i64 1, align 4 +// LLVM: store { float, float } %{{.*}}, ptr %[[A_ADDR]], align 4 + +// OGCG: %[[A_ADDR:.*]] = alloca { float, float }, align 4 +// OGCG: store <2 x float> %a.coerce, ptr %[[A_ADDR]], align 4 + +void complex_type_argument() { + float _Complex a; + complex_type_parameter(a); +} + +// CIR: %[[A_ADDR:.*]] = cir.alloca !cir.complex, !cir.ptr>, ["a"] +// CIR: %[[ARG_ADDR:.*]] = cir.alloca !cir.complex, !cir.ptr>, ["coerce"] +// CIR: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr>, !cir.complex +// CIR: cir.store{{.*}} %[[TMP_A]], %[[ARG_ADDR]] : !cir.complex, !cir.ptr> +// CIR: %[[TMP_ARG:.*]] = cir.load{{.*}} %[[ARG_ADDR]] : !cir.ptr>, !cir.complex +// CIR: cir.call @_Z22complex_type_parameterCf(%[[TMP_ARG]]) : (!cir.complex) -> () + +// LLVM: %[[A_ADDR:.*]] = alloca { float, float }, i64 1, align 4 +// LLVM: %[[ARG_ADDR:.*]] = alloca { float, float }, i64 1, align 4 +// LLVM: %[[TMP_A:.*]] = load { float, float }, ptr %[[A_ADDR]], align 4 +// LLVM: store { float, float } %[[TMP_A]], ptr %[[ARG_ADDR]], align 4 +// LLVM: %[[TMP_ARG:.*]] = load { float, float }, ptr %[[ARG_ADDR]], align 4 +// LLVM: call void @_Z22complex_type_parameterCf({ float, float } %[[TMP_ARG]]) + +// OGCG: %[[A_ADDR:.*]] = alloca { float, float }, align 4 +// OGCG: %[[ARG_ADDR:.*]] = alloca { float, float }, align 4 +// OGCG: %[[A_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[A_ADDR]], i32 0, i32 0 +// OGCG: %[[A_REAL:.*]] = load float, ptr %[[A_REAL_PTR]], align 4 +// OGCG: %[[A_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[A_ADDR]], i32 0, i32 1 +// OGCG: %[[A_IMAG:.*]] = load float, ptr %[[A_IMAG_PTR]], align 4 +// OGCG: %[[ARG_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[ARG_ADDR]], i32 0, i32 0 +// OGCG: %[[ARG_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[ARG_ADDR]], i32 0, i32 1 +// OGCG: store float %[[A_REAL]], ptr %[[ARG_REAL_PTR]], align 4 +// OGCG: store float %[[A_IMAG]], ptr %[[ARG_IMAG_PTR]], align 4 +// OGCG: %[[TMP_ARG:.*]] = load <2 x float>, ptr %[[ARG_ADDR]], align 4 +// OGCG: call void @_Z22complex_type_parameterCf(<2 x float> noundef %[[TMP_ARG]])