Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion src/target/llvm/codegen_llvm.cc
Original file line number Diff line number Diff line change
Expand Up @@ -928,7 +928,7 @@ llvm::Value* CodeGenLLVM::CreateCast(DataType from, DataType to, llvm::Value* va
} else if (to.is_bool()) {
if (from.is_float()) {
llvm::Constant* zero = llvm::ConstantFP::get(DTypeToLLVMType(from), 0.);
return builder_->CreateFCmpONE(value, zero);
return builder_->CreateFCmpUNE(value, zero);
} else {
llvm::Constant* zero = llvm::ConstantInt::get(DTypeToLLVMType(from), 0);
return builder_->CreateICmpNE(value, zero);
Expand Down
24 changes: 24 additions & 0 deletions tests/python/codegen/test_target_codegen_llvm.py
Original file line number Diff line number Diff line change
Expand Up @@ -370,6 +370,30 @@ def main(A: T.Buffer((64,), "int32"), C: T.Buffer((64,), "float32")):
tvm.testing.assert_allclose(c.numpy(), c_np)


@tvm.testing.requires_llvm
def test_llvm_cast_float_to_bool():
@I.ir_module
class Module:
@T.prim_func
def main(A: T.Buffer((4,), "float32"), C: T.Buffer((4,), "bool")):
T.func_attr({"tir.noalias": True})
for i in range(4):
with T.sblock("C"):
v_i = T.axis.spatial(4, i)
T.reads(A[v_i])
T.writes(C[v_i])
C[v_i] = T.Cast("bool", A[v_i])

n = 4
f = tvm.compile(Module, target="llvm")
dev = tvm.cpu(0)
a = tvm.runtime.tensor(np.array([0.0, 1.0, np.nan, np.inf], dtype="float32"), dev)
c = tvm.runtime.empty((n,), dtype="bool", device=dev)
f(a, c)
c_np = np.array([False, True, True, True], dtype="bool")
tvm.testing.assert_allclose(c.numpy(), c_np)
Comment on lines +373 to +394
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

medium

This is a great test for verifying the new behavior. To improve test coverage, consider parameterizing it to run against different float types (float16, float32, float64), since the change in codegen_llvm.cc affects all float types.

@tvm.testing.requires_llvm
@pytest.mark.parametrize("dtype", ["float16", "float32", "float64"])
def test_llvm_cast_float_to_bool(dtype):
    @I.ir_module
    class Module:
        @T.prim_func
        def main(A: T.Buffer((4,), dtype), C: T.Buffer((4,), "bool")):
            T.func_attr({"tir.noalias": True})
            for i in range(4):
                with T.sblock("C"):
                    v_i = T.axis.spatial(4, i)
                    T.reads(A[v_i])
                    T.writes(C[v_i])
                    C[v_i] = T.Cast("bool", A[v_i])

    n = 4
    f = tvm.compile(Module, target="llvm")
    dev = tvm.cpu(0)
    a = tvm.runtime.tensor(np.array([0.0, 1.0, np.nan, np.inf], dtype=dtype), dev)
    c = tvm.runtime.empty((n,), dtype="bool", device=dev)
    f(a, c)
    c_np = np.array([False, True, True, True], dtype="bool")
    tvm.testing.assert_allclose(c.numpy(), c_np)



@tvm.testing.requires_llvm
def test_rank_zero():
@I.ir_module
Expand Down