Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
45 changes: 32 additions & 13 deletions clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -621,19 +621,27 @@ class ScalarExprEmitter : public StmtVisitor<ScalarExprEmitter, mlir::Value> {
}

mlir::Value VisitUnaryPlus(const UnaryOperator *e) {
return emitUnaryPlusOrMinus(e, cir::UnaryOpKind::Plus);
QualType promotionType = getPromotionType(e->getSubExpr()->getType());
mlir::Value result =
emitUnaryPlusOrMinus(e, cir::UnaryOpKind::Plus, promotionType);
if (result && !promotionType.isNull())
return emitUnPromotedValue(result, e->getType());
return result;
}

mlir::Value VisitUnaryMinus(const UnaryOperator *e) {
return emitUnaryPlusOrMinus(e, cir::UnaryOpKind::Minus);
QualType promotionType = getPromotionType(e->getSubExpr()->getType());
mlir::Value result =
emitUnaryPlusOrMinus(e, cir::UnaryOpKind::Minus, promotionType);
if (result && !promotionType.isNull())
return emitUnPromotedValue(result, e->getType());
return result;
}

mlir::Value emitUnaryPlusOrMinus(const UnaryOperator *e,
cir::UnaryOpKind kind) {
cir::UnaryOpKind kind,
QualType promotionType) {
ignoreResultAssign = false;

QualType promotionType = getPromotionType(e->getSubExpr()->getType());

mlir::Value operand;
if (!promotionType.isNull())
operand = cgf.emitPromotedScalarExpr(e->getSubExpr(), promotionType);
Expand All @@ -645,10 +653,7 @@ class ScalarExprEmitter : public StmtVisitor<ScalarExprEmitter, mlir::Value> {

// NOTE: LLVM codegen will lower this directly to either a FNeg
// or a Sub instruction. In CIR this will be handled later in LowerToLLVM.
mlir::Value result = emitUnaryOp(e, kind, operand, nsw);
if (result && !promotionType.isNull())
return emitUnPromotedValue(result, e->getType());
return result;
return emitUnaryOp(e, kind, operand, nsw);
}

mlir::Value emitUnaryOp(const UnaryOperator *e, cir::UnaryOpKind kind,
Expand Down Expand Up @@ -1239,9 +1244,23 @@ mlir::Value ScalarExprEmitter::emitPromoted(const Expr *e,
default:
break;
}
} else if (isa<UnaryOperator>(e)) {
cgf.cgm.errorNYI(e->getSourceRange(), "unary operators");
return {};
} else if (const auto *uo = dyn_cast<UnaryOperator>(e)) {
switch (uo->getOpcode()) {
case UO_Imag:
cgf.cgm.errorNYI(e->getSourceRange(),
"ScalarExprEmitter::emitPromoted unary imag");
return {};
case UO_Real:
cgf.cgm.errorNYI(e->getSourceRange(),
"ScalarExprEmitter::emitPromoted unary real");
return {};
case UO_Minus:
return emitUnaryPlusOrMinus(uo, cir::UnaryOpKind::Minus, promotionType);
case UO_Plus:
return emitUnaryPlusOrMinus(uo, cir::UnaryOpKind::Plus, promotionType);
default:
break;
}
}
mlir::Value result = Visit(const_cast<Expr *>(e));
if (result) {
Expand Down
66 changes: 66 additions & 0 deletions clang/test/CIR/CodeGen/unary.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -556,3 +556,69 @@ void test_logical_not() {
// OGCG: %[[D_NOT:.*]] = xor i1 %[[D_BOOL]], true
// OGCG: %[[D_CAST:.*]] = zext i1 %[[D_NOT]] to i8
// OGCG: store i8 %[[D_CAST]], ptr %[[B_ADDR]], align 1

void f16NestedUPlus() {
_Float16 a;
_Float16 b = +(+a);
}

// CHECK: cir.func{{.*}} @_Z14f16NestedUPlusv()
// CHECK: %[[A_ADDR:.*]] = cir.alloca !cir.f16, !cir.ptr<!cir.f16>, ["a"]
// CHECK: %[[B_ADDR:.*]] = cir.alloca !cir.f16, !cir.ptr<!cir.f16>, ["b", init]
// CHECK: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr<!cir.f16>, !cir.f16
// CHECK: %[[A_F32:.*]] = cir.cast(floating, %[[TMP_A]] : !cir.f16), !cir.float
// CHECK: %[[A_PLUS:.*]] = cir.unary(plus, %[[A_F32]]) : !cir.float, !cir.float
// CHECK: %[[RESULT_F32:.*]] = cir.unary(plus, %[[A_PLUS]]) : !cir.float, !cir.float
// CHECK: %[[RESULT:.*]] = cir.cast(floating, %[[RESULT_F32]] : !cir.float), !cir.f16
// CHECK: cir.store{{.*}} %[[RESULT]], %[[B_ADDR]] : !cir.f16, !cir.ptr<!cir.f16>

// LLVM: define{{.*}} void @_Z14f16NestedUPlusv()
// LLVM: %[[A_ADDR:.*]] = alloca half, i64 1, align 2
// LLVM: %[[B_ADDR:.*]] = alloca half, i64 1, align 2
// LLVM: %[[TMP_A:.*]] = load half, ptr %[[A_ADDR]], align 2
// LLVM: %[[RESULT_F32:.*]] = fpext half %[[TMP_A]] to float
// LLVM: %[[RESULT:.*]] = fptrunc float %[[RESULT_F32]] to half
// LLVM: store half %[[RESULT]], ptr %[[B_ADDR]], align 2

// OGCG: define{{.*}} void @_Z14f16NestedUPlusv()
// OGCG: %[[A_ADDR:.*]] = alloca half, align 2
// OGCG: %[[B_ADDR:.*]] = alloca half, align 2
// OGCG: %[[TMP_A:.*]] = load half, ptr %[[A_ADDR]], align 2
// OGCG: %[[RESULT_F32:.*]] = fpext half %[[TMP_A]] to float
// OGCG: %[[RESULT:.*]] = fptrunc float %[[RESULT_F32]] to half
// OGCG: store half %[[RESULT]], ptr %[[B_ADDR]], align 2

void f16NestedUMinus() {
_Float16 a;
_Float16 b = -(-a);
}

// CHECK: cir.func{{.*}} @_Z15f16NestedUMinusv()
// CHECK: %[[A_ADDR:.*]] = cir.alloca !cir.f16, !cir.ptr<!cir.f16>, ["a"]
// CHECK: %[[B_ADDR:.*]] = cir.alloca !cir.f16, !cir.ptr<!cir.f16>, ["b", init]
// CHECK: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr<!cir.f16>, !cir.f16
// CHECK: %[[A_F32:.*]] = cir.cast(floating, %[[TMP_A]] : !cir.f16), !cir.float
// CHECK: %[[A_MINUS:.*]] = cir.unary(minus, %[[A_F32]]) : !cir.float, !cir.float
// CHECK: %[[RESULT_F32:.*]] = cir.unary(minus, %[[A_MINUS]]) : !cir.float, !cir.float
// CHECK: %[[RESULT:.*]] = cir.cast(floating, %[[RESULT_F32]] : !cir.float), !cir.f16
// CHECK: cir.store{{.*}} %[[RESULT]], %[[B_ADDR]] : !cir.f16, !cir.ptr<!cir.f16>

// LLVM: define{{.*}} void @_Z15f16NestedUMinusv()
// LLVM: %[[A_ADDR:.*]] = alloca half, i64 1, align 2
// LLVM: %[[B_ADDR:.*]] = alloca half, i64 1, align 2
// LLVM: %[[TMP_A:.*]] = load half, ptr %[[A_ADDR]], align 2
// LLVM: %[[A_F32:.*]] = fpext half %[[TMP_A]] to float
// LLVM: %[[A_MINUS:.*]] = fneg float %[[A_F32]]
// LLVM: %[[RESULT_F32:.*]] = fneg float %[[A_MINUS]]
// LLVM: %[[RESULT:.*]] = fptrunc float %[[RESULT_F32]] to half
// LLVM: store half %[[RESULT]], ptr %[[B_ADDR]], align 2

// OGCG: define{{.*}} void @_Z15f16NestedUMinusv()
// OGCG: %[[A_ADDR:.*]] = alloca half, align 2
// OGCG: %[[B_ADDR:.*]] = alloca half, align 2
// OGCG: %[[TMP_A:.*]] = load half, ptr %[[A_ADDR]], align 2
// OGCG: %[[A_F32:.*]] = fpext half %[[TMP_A]] to float
// OGCG: %[[A_MINUS:.*]] = fneg float %[[A_F32]]
// OGCG: %[[RESULT_F32:.*]] = fneg float %[[A_MINUS]]
// OGCG: %[[RESULT:.*]] = fptrunc float %[[RESULT_F32]] to half
// OGCG: store half %[[RESULT]], ptr %[[B_ADDR]], align 2
Loading