diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index 2261e24fe44c2..34438bd7a7732 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -621,19 +621,27 @@ class ScalarExprEmitter : public StmtVisitor { } mlir::Value VisitUnaryPlus(const UnaryOperator *e) { - return emitUnaryPlusOrMinus(e, cir::UnaryOpKind::Plus); + QualType promotionType = getPromotionType(e->getSubExpr()->getType()); + mlir::Value result = + emitUnaryPlusOrMinus(e, cir::UnaryOpKind::Plus, promotionType); + if (result && !promotionType.isNull()) + return emitUnPromotedValue(result, e->getType()); + return result; } mlir::Value VisitUnaryMinus(const UnaryOperator *e) { - return emitUnaryPlusOrMinus(e, cir::UnaryOpKind::Minus); + QualType promotionType = getPromotionType(e->getSubExpr()->getType()); + mlir::Value result = + emitUnaryPlusOrMinus(e, cir::UnaryOpKind::Minus, promotionType); + if (result && !promotionType.isNull()) + return emitUnPromotedValue(result, e->getType()); + return result; } mlir::Value emitUnaryPlusOrMinus(const UnaryOperator *e, - cir::UnaryOpKind kind) { + cir::UnaryOpKind kind, + QualType promotionType) { ignoreResultAssign = false; - - QualType promotionType = getPromotionType(e->getSubExpr()->getType()); - mlir::Value operand; if (!promotionType.isNull()) operand = cgf.emitPromotedScalarExpr(e->getSubExpr(), promotionType); @@ -645,10 +653,7 @@ class ScalarExprEmitter : public StmtVisitor { // NOTE: LLVM codegen will lower this directly to either a FNeg // or a Sub instruction. In CIR this will be handled later in LowerToLLVM. - mlir::Value result = emitUnaryOp(e, kind, operand, nsw); - if (result && !promotionType.isNull()) - return emitUnPromotedValue(result, e->getType()); - return result; + return emitUnaryOp(e, kind, operand, nsw); } mlir::Value emitUnaryOp(const UnaryOperator *e, cir::UnaryOpKind kind, @@ -1239,9 +1244,23 @@ mlir::Value ScalarExprEmitter::emitPromoted(const Expr *e, default: break; } - } else if (isa(e)) { - cgf.cgm.errorNYI(e->getSourceRange(), "unary operators"); - return {}; + } else if (const auto *uo = dyn_cast(e)) { + switch (uo->getOpcode()) { + case UO_Imag: + cgf.cgm.errorNYI(e->getSourceRange(), + "ScalarExprEmitter::emitPromoted unary imag"); + return {}; + case UO_Real: + cgf.cgm.errorNYI(e->getSourceRange(), + "ScalarExprEmitter::emitPromoted unary real"); + return {}; + case UO_Minus: + return emitUnaryPlusOrMinus(uo, cir::UnaryOpKind::Minus, promotionType); + case UO_Plus: + return emitUnaryPlusOrMinus(uo, cir::UnaryOpKind::Plus, promotionType); + default: + break; + } } mlir::Value result = Visit(const_cast(e)); if (result) { diff --git a/clang/test/CIR/CodeGen/unary.cpp b/clang/test/CIR/CodeGen/unary.cpp index a7c946eaffd03..c37524bc8b2c9 100644 --- a/clang/test/CIR/CodeGen/unary.cpp +++ b/clang/test/CIR/CodeGen/unary.cpp @@ -556,3 +556,69 @@ void test_logical_not() { // OGCG: %[[D_NOT:.*]] = xor i1 %[[D_BOOL]], true // OGCG: %[[D_CAST:.*]] = zext i1 %[[D_NOT]] to i8 // OGCG: store i8 %[[D_CAST]], ptr %[[B_ADDR]], align 1 + +void f16NestedUPlus() { + _Float16 a; + _Float16 b = +(+a); +} + +// CHECK: cir.func{{.*}} @_Z14f16NestedUPlusv() +// CHECK: %[[A_ADDR:.*]] = cir.alloca !cir.f16, !cir.ptr, ["a"] +// CHECK: %[[B_ADDR:.*]] = cir.alloca !cir.f16, !cir.ptr, ["b", init] +// CHECK: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr, !cir.f16 +// CHECK: %[[A_F32:.*]] = cir.cast(floating, %[[TMP_A]] : !cir.f16), !cir.float +// CHECK: %[[A_PLUS:.*]] = cir.unary(plus, %[[A_F32]]) : !cir.float, !cir.float +// CHECK: %[[RESULT_F32:.*]] = cir.unary(plus, %[[A_PLUS]]) : !cir.float, !cir.float +// CHECK: %[[RESULT:.*]] = cir.cast(floating, %[[RESULT_F32]] : !cir.float), !cir.f16 +// CHECK: cir.store{{.*}} %[[RESULT]], %[[B_ADDR]] : !cir.f16, !cir.ptr + +// LLVM: define{{.*}} void @_Z14f16NestedUPlusv() +// LLVM: %[[A_ADDR:.*]] = alloca half, i64 1, align 2 +// LLVM: %[[B_ADDR:.*]] = alloca half, i64 1, align 2 +// LLVM: %[[TMP_A:.*]] = load half, ptr %[[A_ADDR]], align 2 +// LLVM: %[[RESULT_F32:.*]] = fpext half %[[TMP_A]] to float +// LLVM: %[[RESULT:.*]] = fptrunc float %[[RESULT_F32]] to half +// LLVM: store half %[[RESULT]], ptr %[[B_ADDR]], align 2 + +// OGCG: define{{.*}} void @_Z14f16NestedUPlusv() +// OGCG: %[[A_ADDR:.*]] = alloca half, align 2 +// OGCG: %[[B_ADDR:.*]] = alloca half, align 2 +// OGCG: %[[TMP_A:.*]] = load half, ptr %[[A_ADDR]], align 2 +// OGCG: %[[RESULT_F32:.*]] = fpext half %[[TMP_A]] to float +// OGCG: %[[RESULT:.*]] = fptrunc float %[[RESULT_F32]] to half +// OGCG: store half %[[RESULT]], ptr %[[B_ADDR]], align 2 + +void f16NestedUMinus() { + _Float16 a; + _Float16 b = -(-a); +} + +// CHECK: cir.func{{.*}} @_Z15f16NestedUMinusv() +// CHECK: %[[A_ADDR:.*]] = cir.alloca !cir.f16, !cir.ptr, ["a"] +// CHECK: %[[B_ADDR:.*]] = cir.alloca !cir.f16, !cir.ptr, ["b", init] +// CHECK: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr, !cir.f16 +// CHECK: %[[A_F32:.*]] = cir.cast(floating, %[[TMP_A]] : !cir.f16), !cir.float +// CHECK: %[[A_MINUS:.*]] = cir.unary(minus, %[[A_F32]]) : !cir.float, !cir.float +// CHECK: %[[RESULT_F32:.*]] = cir.unary(minus, %[[A_MINUS]]) : !cir.float, !cir.float +// CHECK: %[[RESULT:.*]] = cir.cast(floating, %[[RESULT_F32]] : !cir.float), !cir.f16 +// CHECK: cir.store{{.*}} %[[RESULT]], %[[B_ADDR]] : !cir.f16, !cir.ptr + +// LLVM: define{{.*}} void @_Z15f16NestedUMinusv() +// LLVM: %[[A_ADDR:.*]] = alloca half, i64 1, align 2 +// LLVM: %[[B_ADDR:.*]] = alloca half, i64 1, align 2 +// LLVM: %[[TMP_A:.*]] = load half, ptr %[[A_ADDR]], align 2 +// LLVM: %[[A_F32:.*]] = fpext half %[[TMP_A]] to float +// LLVM: %[[A_MINUS:.*]] = fneg float %[[A_F32]] +// LLVM: %[[RESULT_F32:.*]] = fneg float %[[A_MINUS]] +// LLVM: %[[RESULT:.*]] = fptrunc float %[[RESULT_F32]] to half +// LLVM: store half %[[RESULT]], ptr %[[B_ADDR]], align 2 + +// OGCG: define{{.*}} void @_Z15f16NestedUMinusv() +// OGCG: %[[A_ADDR:.*]] = alloca half, align 2 +// OGCG: %[[B_ADDR:.*]] = alloca half, align 2 +// OGCG: %[[TMP_A:.*]] = load half, ptr %[[A_ADDR]], align 2 +// OGCG: %[[A_F32:.*]] = fpext half %[[TMP_A]] to float +// OGCG: %[[A_MINUS:.*]] = fneg float %[[A_F32]] +// OGCG: %[[RESULT_F32:.*]] = fneg float %[[A_MINUS]] +// OGCG: %[[RESULT:.*]] = fptrunc float %[[RESULT_F32]] to half +// OGCG: store half %[[RESULT]], ptr %[[B_ADDR]], align 2