Skip to content

Commit a871207

Browse files
authored
[CIR] Complex Unary plus and minus with promoted type (llvm#155486)
This change adds support for Complex unary plus and minus expressions with promoted type Issue: llvm#141365
1 parent 0499eb8 commit a871207

File tree

2 files changed

+139
-8
lines changed

2 files changed

+139
-8
lines changed

clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp

Lines changed: 4 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -327,10 +327,8 @@ mlir::Value ComplexExprEmitter::emitCast(CastKind ck, Expr *op,
327327
mlir::Value ComplexExprEmitter::VisitUnaryPlus(const UnaryOperator *e) {
328328
QualType promotionTy = getPromotionType(e->getSubExpr()->getType());
329329
mlir::Value result = VisitPlusMinus(e, cir::UnaryOpKind::Plus, promotionTy);
330-
if (!promotionTy.isNull()) {
331-
cgf.cgm.errorNYI("ComplexExprEmitter::VisitUnaryPlus emitUnPromotedValue");
332-
return {};
333-
}
330+
if (!promotionTy.isNull())
331+
return cgf.emitUnPromotedValue(result, e->getSubExpr()->getType());
334332
return result;
335333
}
336334

@@ -352,10 +350,8 @@ mlir::Value ComplexExprEmitter::VisitPlusMinus(const UnaryOperator *e,
352350
mlir::Value ComplexExprEmitter::VisitUnaryMinus(const UnaryOperator *e) {
353351
QualType promotionTy = getPromotionType(e->getSubExpr()->getType());
354352
mlir::Value result = VisitPlusMinus(e, cir::UnaryOpKind::Minus, promotionTy);
355-
if (!promotionTy.isNull()) {
356-
cgf.cgm.errorNYI("ComplexExprEmitter::VisitUnaryMinus emitUnPromotedValue");
357-
return {};
358-
}
353+
if (!promotionTy.isNull())
354+
return cgf.emitUnPromotedValue(result, e->getSubExpr()->getType());
359355
return result;
360356
}
361357

clang/test/CIR/CodeGen/complex-unary.cpp

Lines changed: 135 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -370,3 +370,138 @@ void foo8() {
370370
// OGCG: %[[B_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[B_ADDR]], i32 0, i32 1
371371
// OGCG: store float %[[A_REAL_MINUS]], ptr %[[B_REAL_PTR]], align 4
372372
// OGCG: store float %[[A_IMAG_MINUS]], ptr %[[B_IMAG_PTR]], align 4
373+
374+
void foo9() {
375+
_Float16 _Complex a;
376+
_Float16 _Complex b = +a;
377+
}
378+
379+
380+
// CIR-BEFORE: %[[A_ADDR:.*]] = cir.alloca !cir.complex<!cir.f16>, !cir.ptr<!cir.complex<!cir.f16>>, ["a"]
381+
// CIR-BEFORE: %[[B_ADDR:.*]] = cir.alloca !cir.complex<!cir.f16>, !cir.ptr<!cir.complex<!cir.f16>>, ["b", init]
382+
// CIR-BEFORE: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr<!cir.complex<!cir.f16>>, !cir.complex<!cir.f16>
383+
// CIR-BEFORE: %[[A_COMPLEX_F32:.*]] = cir.cast(float_complex, %[[TMP_A]] : !cir.complex<!cir.f16>), !cir.complex<!cir.float>
384+
// CIR-BEFORE: %[[RESULT:.*]] = cir.unary(plus, %[[A_COMPLEX_F32]]) : !cir.complex<!cir.float>, !cir.complex<!cir.float>
385+
// CIR-BEFORE: %[[A_COMPLEX_F16:.*]] = cir.cast(float_complex, %[[RESULT]] : !cir.complex<!cir.float>), !cir.complex<!cir.f16>
386+
// CIR-BEFORE: cir.store{{.*}} %[[A_COMPLEX_F16]], %[[B_ADDR]] : !cir.complex<!cir.f16>, !cir.ptr<!cir.complex<!cir.f16>>
387+
388+
// CIR-AFTER: %[[A_ADDR:.*]] = cir.alloca !cir.complex<!cir.f16>, !cir.ptr<!cir.complex<!cir.f16>>, ["a"]
389+
// CIR-AFTER: %[[B_ADDR:.*]] = cir.alloca !cir.complex<!cir.f16>, !cir.ptr<!cir.complex<!cir.f16>>, ["b", init]
390+
// CIR-AFTER: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr<!cir.complex<!cir.f16>>, !cir.complex<!cir.f16>
391+
// CIR-AFTER: %[[A_REAL:.*]] = cir.complex.real %[[TMP_A]] : !cir.complex<!cir.f16> -> !cir.f16
392+
// CIR-AFTER: %[[A_IMAG:.*]] = cir.complex.imag %[[TMP_A]] : !cir.complex<!cir.f16> -> !cir.f16
393+
// CIR-AFTER: %[[A_REAL_F32:.*]] = cir.cast(floating, %[[A_REAL]] : !cir.f16), !cir.float
394+
// CIR-AFTER: %[[A_IMAG_F32:.*]] = cir.cast(floating, %[[A_IMAG]] : !cir.f16), !cir.float
395+
// CIR-AFTER: %[[A_COMPLEX_F32:.*]] = cir.complex.create %[[A_REAL_F32]], %[[A_IMAG_F32]] : !cir.float -> !cir.complex<!cir.float>
396+
// CIR-AFTER: %[[A_REAL_F32:.*]] = cir.complex.real %[[A_COMPLEX_F32]] : !cir.complex<!cir.float> -> !cir.float
397+
// CIR-AFTER: %[[A_IMAG_F32:.*]] = cir.complex.imag %[[A_COMPLEX_F32]] : !cir.complex<!cir.float> -> !cir.float
398+
// CIR-AFTER: %[[RESULT_REAL_F32:.*]] = cir.unary(plus, %[[A_REAL_F32]]) : !cir.float, !cir.float
399+
// CIR-AFTER: %[[RESULT_IMAG_F32:.*]] = cir.unary(plus, %[[A_IMAG_F32]]) : !cir.float, !cir.float
400+
// CIR-AFTER: %[[RESULT_COMPLEX_F32:.*]] = cir.complex.create %[[RESULT_REAL_F32]], %[[RESULT_IMAG_F32]] : !cir.float -> !cir.complex<!cir.float>
401+
// CIR-AFTER: %[[RESULT_REAL_F32:.*]] = cir.complex.real %[[RESULT_COMPLEX_F32]] : !cir.complex<!cir.float> -> !cir.float
402+
// CIR-AFTER: %[[RESULT_IMAG_F32:.*]] = cir.complex.imag %[[RESULT_COMPLEX_F32]] : !cir.complex<!cir.float> -> !cir.float
403+
// CIR-AFTER: %[[RESULT_REAL_F16:.*]] = cir.cast(floating, %[[RESULT_REAL_F32]] : !cir.float), !cir.f16
404+
// CIR-AFTER: %[[RESULT_IMAG_F16:.*]] = cir.cast(floating, %[[RESULT_IMAG_F32]] : !cir.float), !cir.f16
405+
// CIR-AFTER: %[[RESULT_COMPLEX_F16:.*]] = cir.complex.create %[[RESULT_REAL_F16]], %[[RESULT_IMAG_F16]] : !cir.f16 -> !cir.complex<!cir.f16>
406+
// CIR-AFTER: cir.store{{.*}} %[[RESULT_COMPLEX_F16]], %[[B_ADDR]] : !cir.complex<!cir.f16>, !cir.ptr<!cir.complex<!cir.f16>>
407+
408+
// LLVM: %[[A_ADDR:.*]] = alloca { half, half }, i64 1, align 2
409+
// LLVM: %[[B_ADDR:.*]] = alloca { half, half }, i64 1, align 2
410+
// LLVM: %[[TMP_A:.*]] = load { half, half }, ptr %[[A_ADDR]], align 2
411+
// LLVM: %[[A_REAL:.*]] = extractvalue { half, half } %[[TMP_A]], 0
412+
// LLVM: %[[A_IMAG:.*]] = extractvalue { half, half } %[[TMP_A]], 1
413+
// LLVM: %[[A_REAL_F32:.*]] = fpext half %[[A_REAL]] to float
414+
// LLVM: %[[A_IMAG_F32:.*]] = fpext half %[[A_IMAG]] to float
415+
// LLVM: %[[TMP_A_COMPLEX_F32:.*]] = insertvalue { float, float } {{.*}}, float %[[A_REAL_F32]], 0
416+
// LLVM: %[[A_COMPLEX_F32:.*]] = insertvalue { float, float } %[[TMP_A_COMPLEX_F32]], float %[[A_IMAG_F32]], 1
417+
// LLVM: %[[TMP_A_COMPLEX_F32:.*]] = insertvalue { float, float } {{.*}}, float %[[A_REAL_F32]], 0
418+
// LLVM: %[[A_COMPLEX_F32:.*]] = insertvalue { float, float } %[[TMP_A_COMPLEX_F32]], float %[[A_IMAG_F32]], 1
419+
// LLVM: %[[A_REAL_F16:.*]] = fptrunc float %[[A_REAL_F32]] to half
420+
// LLVM: %[[A_IMAG_F16:.*]] = fptrunc float %[[A_IMAG_F32]] to half
421+
// LLVM: %[[TMP_RESULT_COMPLEX_F16:.*]] = insertvalue { half, half } {{.*}}, half %[[A_REAL_F16]], 0
422+
// LLVM: %[[RESULT_COMPLEX_F16:.*]] = insertvalue { half, half } %[[TMP_RESULT_COMPLEX_F16]], half %[[A_IMAG_F16]], 1
423+
// LLVM: store { half, half } %[[RESULT_COMPLEX_F16]], ptr %[[B_ADDR]], align 2
424+
425+
// OGCG: %[[A_ADDR:.*]] = alloca { half, half }, align 2
426+
// OGCG: %[[B_ADDR:.*]] = alloca { half, half }, align 2
427+
// OGCG: %[[A_REAL_PTR:.*]] = getelementptr inbounds nuw { half, half }, ptr %[[A_ADDR]], i32 0, i32 0
428+
// OGCG: %[[A_REAL:.*]] = load half, ptr %a.realp, align 2
429+
// OGCG: %[[A_IMAG_PTR:.*]] = getelementptr inbounds nuw { half, half }, ptr %[[A_ADDR]], i32 0, i32 1
430+
// OGCG: %[[A_IMAG:.*]] = load half, ptr %a.imagp, align 2
431+
// OGCG: %[[A_REAL_F32:.*]] = fpext half %[[A_REAL]] to float
432+
// OGCG: %[[A_IMAG_F32:.*]] = fpext half %[[A_IMAG]] to float
433+
// OGCG: %[[RESULT_REAL:.*]] = fptrunc float %[[A_REAL_F32]] to half
434+
// OGCG: %[[RESULT_IMAG:.*]] = fptrunc float %[[A_IMAG_F32]] to half
435+
// OGCG: %[[B_REAL_PTR:.*]] = getelementptr inbounds nuw { half, half }, ptr %[[B_ADDR]], i32 0, i32 0
436+
// OGCG: %[[B_IMAG_PTR:.*]] = getelementptr inbounds nuw { half, half }, ptr %[[B_ADDR]], i32 0, i32 1
437+
// OGCG: store half %[[RESULT_REAL]], ptr %[[B_REAL_PTR]], align 2
438+
// OGCG: store half %[[RESULT_IMAG]], ptr %[[B_IMAG_PTR]], align 2
439+
440+
void foo10() {
441+
_Float16 _Complex a;
442+
_Float16 _Complex b = -a;
443+
}
444+
445+
// CIR-BEFORE: %[[A_ADDR:.*]] = cir.alloca !cir.complex<!cir.f16>, !cir.ptr<!cir.complex<!cir.f16>>, ["a"]
446+
// CIR-BEFORE: %[[B_ADDR:.*]] = cir.alloca !cir.complex<!cir.f16>, !cir.ptr<!cir.complex<!cir.f16>>, ["b", init]
447+
// CIR-BEFORE: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr<!cir.complex<!cir.f16>>, !cir.complex<!cir.f16>
448+
// CIR-BEFORE: %[[A_COMPLEX_F32:.*]] = cir.cast(float_complex, %[[TMP_A]] : !cir.complex<!cir.f16>), !cir.complex<!cir.float>
449+
// CIR-BEFORE: %[[RESULT:.*]] = cir.unary(minus, %[[A_COMPLEX_F32]]) : !cir.complex<!cir.float>, !cir.complex<!cir.float>
450+
// CIR-BEFORE: %[[A_COMPLEX_F16:.*]] = cir.cast(float_complex, %[[RESULT]] : !cir.complex<!cir.float>), !cir.complex<!cir.f16>
451+
// CIR-BEFORE: cir.store{{.*}} %[[A_COMPLEX_F16]], %[[B_ADDR]] : !cir.complex<!cir.f16>, !cir.ptr<!cir.complex<!cir.f16>>
452+
453+
// CIR-AFTER: %[[A_ADDR:.*]] = cir.alloca !cir.complex<!cir.f16>, !cir.ptr<!cir.complex<!cir.f16>>, ["a"]
454+
// CIR-AFTER: %[[B_ADDR:.*]] = cir.alloca !cir.complex<!cir.f16>, !cir.ptr<!cir.complex<!cir.f16>>, ["b", init]
455+
// CIR-AFTER: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr<!cir.complex<!cir.f16>>, !cir.complex<!cir.f16>
456+
// CIR-AFTER: %[[A_REAL:.*]] = cir.complex.real %[[TMP_A]] : !cir.complex<!cir.f16> -> !cir.f16
457+
// CIR-AFTER: %[[A_IMAG:.*]] = cir.complex.imag %[[TMP_A]] : !cir.complex<!cir.f16> -> !cir.f16
458+
// CIR-AFTER: %[[A_REAL_F32:.*]] = cir.cast(floating, %[[A_REAL]] : !cir.f16), !cir.float
459+
// CIR-AFTER: %[[A_IMAG_F32:.*]] = cir.cast(floating, %[[A_IMAG]] : !cir.f16), !cir.float
460+
// CIR-AFTER: %[[A_COMPLEX_F32:.*]] = cir.complex.create %[[A_REAL_F32]], %[[A_IMAG_F32]] : !cir.float -> !cir.complex<!cir.float>
461+
// CIR-AFTER: %[[A_REAL_F32:.*]] = cir.complex.real %[[A_COMPLEX_F32]] : !cir.complex<!cir.float> -> !cir.float
462+
// CIR-AFTER: %[[A_IMAG_F32:.*]] = cir.complex.imag %[[A_COMPLEX_F32]] : !cir.complex<!cir.float> -> !cir.float
463+
// CIR-AFTER: %[[RESULT_REAL_F32:.*]] = cir.unary(minus, %[[A_REAL_F32]]) : !cir.float, !cir.float
464+
// CIR-AFTER: %[[RESULT_IMAG_F32:.*]] = cir.unary(minus, %[[A_IMAG_F32]]) : !cir.float, !cir.float
465+
// CIR-AFTER: %[[RESULT_COMPLEX_F32:.*]] = cir.complex.create %[[RESULT_REAL_F32]], %[[RESULT_IMAG_F32]] : !cir.float -> !cir.complex<!cir.float>
466+
// CIR-AFTER: %[[RESULT_REAL_F32:.*]] = cir.complex.real %[[RESULT_COMPLEX_F32]] : !cir.complex<!cir.float> -> !cir.float
467+
// CIR-AFTER: %[[RESULT_IMAG_F32:.*]] = cir.complex.imag %[[RESULT_COMPLEX_F32]] : !cir.complex<!cir.float> -> !cir.float
468+
// CIR-AFTER: %[[RESULT_REAL_F16:.*]] = cir.cast(floating, %[[RESULT_REAL_F32]] : !cir.float), !cir.f16
469+
// CIR-AFTER: %[[RESULT_IMAG_F16:.*]] = cir.cast(floating, %[[RESULT_IMAG_F32]] : !cir.float), !cir.f16
470+
// CIR-AFTER: %[[RESULT_COMPLEX_F16:.*]] = cir.complex.create %[[RESULT_REAL_F16]], %[[RESULT_IMAG_F16]] : !cir.f16 -> !cir.complex<!cir.f16>
471+
// CIR-AFTER: cir.store{{.*}} %[[RESULT_COMPLEX_F16]], %[[B_ADDR]] : !cir.complex<!cir.f16>, !cir.ptr<!cir.complex<!cir.f16>>
472+
473+
// LLVM: %[[A_ADDR:.*]] = alloca { half, half }, i64 1, align 2
474+
// LLVM: %[[B_ADDR:.*]] = alloca { half, half }, i64 1, align 2
475+
// LLVM: %[[TMP_A:.*]] = load { half, half }, ptr %[[A_ADDR]], align 2
476+
// LLVM: %[[A_REAL:.*]] = extractvalue { half, half } %[[TMP_A]], 0
477+
// LLVM: %[[A_IMAG:.*]] = extractvalue { half, half } %[[TMP_A]], 1
478+
// LLVM: %[[A_REAL_F32:.*]] = fpext half %[[A_REAL]] to float
479+
// LLVM: %[[A_IMAG_F32:.*]] = fpext half %[[A_IMAG]] to float
480+
// LLVM: %[[TMP_A_COMPLEX_F32:.*]] = insertvalue { float, float } {{.*}}, float %[[A_REAL_F32]], 0
481+
// LLVM: %[[A_COMPLEX_F32:.*]] = insertvalue { float, float } %[[TMP_A_COMPLEX_F32]], float %[[A_IMAG_F32]], 1
482+
// LLVM: %[[RESULT_REAL_F32:.*]] = fneg float %[[A_REAL_F32]]
483+
// LLVM: %[[RESULT_IMAG_F32:.*]] = fneg float %[[A_IMAG_F32]]
484+
// LLVM: %[[TMP_A_COMPLEX_F32:.*]] = insertvalue { float, float } {{.*}}, float %[[RESULT_REAL_F32]], 0
485+
// LLVM: %[[A_COMPLEX_F32:.*]] = insertvalue { float, float } %[[TMP_A_COMPLEX_F32]], float %[[RESULT_IMAG_F32]], 1
486+
// LLVM: %[[A_REAL_F16:.*]] = fptrunc float %[[RESULT_REAL_F32]] to half
487+
// LLVM: %[[A_IMAG_F16:.*]] = fptrunc float %[[RESULT_IMAG_F32]] to half
488+
// LLVM: %[[TMP_RESULT_COMPLEX_F16:.*]] = insertvalue { half, half } {{.*}}, half %[[A_REAL_F16]], 0
489+
// LLVM: %[[RESULT_COMPLEX_F16:.*]] = insertvalue { half, half } %[[TMP_RESULT_COMPLEX_F16]], half %[[A_IMAG_F16]], 1
490+
// LLVM: store { half, half } %[[RESULT_COMPLEX_F16]], ptr %[[B_ADDR]], align 2
491+
492+
// OGCG: %[[A_ADDR:.*]] = alloca { half, half }, align 2
493+
// OGCG: %[[B_ADDR:.*]] = alloca { half, half }, align 2
494+
// OGCG: %[[A_REAL_PTR:.*]] = getelementptr inbounds nuw { half, half }, ptr %[[A_ADDR]], i32 0, i32 0
495+
// OGCG: %[[A_REAL:.*]] = load half, ptr %a.realp, align 2
496+
// OGCG: %[[A_IMAG_PTR:.*]] = getelementptr inbounds nuw { half, half }, ptr %[[A_ADDR]], i32 0, i32 1
497+
// OGCG: %[[A_IMAG:.*]] = load half, ptr %a.imagp, align 2
498+
// OGCG: %[[A_REAL_F32:.*]] = fpext half %[[A_REAL]] to float
499+
// OGCG: %[[A_IMAG_F32:.*]] = fpext half %[[A_IMAG]] to float
500+
// OGCG: %[[RESULT_REAL_F32:.*]] = fneg float %[[A_REAL_F32]]
501+
// OGCG: %[[RESULT_IMAG_F32:.*]] = fneg float %[[A_IMAG_F32]]
502+
// OGCG: %[[RESULT_REAL:.*]] = fptrunc float %[[RESULT_REAL_F32]] to half
503+
// OGCG: %[[RESULT_IMAG:.*]] = fptrunc float %[[RESULT_IMAG_F32]] to half
504+
// OGCG: %[[B_REAL_PTR:.*]] = getelementptr inbounds nuw { half, half }, ptr %[[B_ADDR]], i32 0, i32 0
505+
// OGCG: %[[B_IMAG_PTR:.*]] = getelementptr inbounds nuw { half, half }, ptr %[[B_ADDR]], i32 0, i32 1
506+
// OGCG: store half %[[RESULT_REAL]], ptr %[[B_REAL_PTR]], align 2
507+
// OGCG: store half %[[RESULT_IMAG]], ptr %[[B_IMAG_PTR]], align 2

0 commit comments

Comments
 (0)