Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
20 changes: 20 additions & 0 deletions clang/include/clang/Basic/BuiltinsNVPTX.td
Original file line number Diff line number Diff line change
Expand Up @@ -378,16 +378,24 @@ def __nvvm_fma_rn_sat_f16 : NVPTXBuiltinSMAndPTX<"__fp16(__fp16, __fp16, __fp16)
def __nvvm_fma_rn_ftz_sat_f16 : NVPTXBuiltinSMAndPTX<"__fp16(__fp16, __fp16, __fp16)", SM_53, PTX42>;
def __nvvm_fma_rn_relu_f16 : NVPTXBuiltinSMAndPTX<"__fp16(__fp16, __fp16, __fp16)", SM_80, PTX70>;
def __nvvm_fma_rn_ftz_relu_f16 : NVPTXBuiltinSMAndPTX<"__fp16(__fp16, __fp16, __fp16)", SM_80, PTX70>;
def __nvvm_fma_rn_oob_f16 : NVPTXBuiltinSMAndPTX<"__fp16(__fp16, __fp16, __fp16)", SM_90, PTX81>;
def __nvvm_fma_rn_oob_relu_f16 : NVPTXBuiltinSMAndPTX<"__fp16(__fp16, __fp16, __fp16)", SM_90, PTX81>;
def __nvvm_fma_rn_f16x2 : NVPTXBuiltinSMAndPTX<"_Vector<2, __fp16>(_Vector<2, __fp16>, _Vector<2, __fp16>, _Vector<2, __fp16>)", SM_53, PTX42>;
def __nvvm_fma_rn_ftz_f16x2 : NVPTXBuiltinSMAndPTX<"_Vector<2, __fp16>(_Vector<2, __fp16>, _Vector<2, __fp16>, _Vector<2, __fp16>)", SM_53, PTX42>;
def __nvvm_fma_rn_sat_f16x2 : NVPTXBuiltinSMAndPTX<"_Vector<2, __fp16>(_Vector<2, __fp16>, _Vector<2, __fp16>, _Vector<2, __fp16>)", SM_53, PTX42>;
def __nvvm_fma_rn_ftz_sat_f16x2 : NVPTXBuiltinSMAndPTX<"_Vector<2, __fp16>(_Vector<2, __fp16>, _Vector<2, __fp16>, _Vector<2, __fp16>)", SM_53, PTX42>;
def __nvvm_fma_rn_relu_f16x2 : NVPTXBuiltinSMAndPTX<"_Vector<2, __fp16>(_Vector<2, __fp16>, _Vector<2, __fp16>, _Vector<2, __fp16>)", SM_80, PTX70>;
def __nvvm_fma_rn_ftz_relu_f16x2 : NVPTXBuiltinSMAndPTX<"_Vector<2, __fp16>(_Vector<2, __fp16>, _Vector<2, __fp16>, _Vector<2, __fp16>)", SM_80, PTX70>;
def __nvvm_fma_rn_oob_f16x2 : NVPTXBuiltinSMAndPTX<"_Vector<2, __fp16>(_Vector<2, __fp16>, _Vector<2, __fp16>, _Vector<2, __fp16>)", SM_90, PTX81>;
def __nvvm_fma_rn_oob_relu_f16x2 : NVPTXBuiltinSMAndPTX<"_Vector<2, __fp16>(_Vector<2, __fp16>, _Vector<2, __fp16>, _Vector<2, __fp16>)", SM_90, PTX81>;
def __nvvm_fma_rn_bf16 : NVPTXBuiltinSMAndPTX<"__bf16(__bf16, __bf16, __bf16)", SM_80, PTX70>;
def __nvvm_fma_rn_relu_bf16 : NVPTXBuiltinSMAndPTX<"__bf16(__bf16, __bf16, __bf16)", SM_80, PTX70>;
def __nvvm_fma_rn_oob_bf16 : NVPTXBuiltinSMAndPTX<"__bf16(__bf16, __bf16, __bf16)", SM_90, PTX81>;
def __nvvm_fma_rn_oob_relu_bf16 : NVPTXBuiltinSMAndPTX<"__bf16(__bf16, __bf16, __bf16)", SM_90, PTX81>;
def __nvvm_fma_rn_bf16x2 : NVPTXBuiltinSMAndPTX<"_Vector<2, __bf16>(_Vector<2, __bf16>, _Vector<2, __bf16>, _Vector<2, __bf16>)", SM_80, PTX70>;
def __nvvm_fma_rn_relu_bf16x2 : NVPTXBuiltinSMAndPTX<"_Vector<2, __bf16>(_Vector<2, __bf16>, _Vector<2, __bf16>, _Vector<2, __bf16>)", SM_80, PTX70>;
def __nvvm_fma_rn_oob_bf16x2 : NVPTXBuiltinSMAndPTX<"_Vector<2, __bf16>(_Vector<2, __bf16>, _Vector<2, __bf16>, _Vector<2, __bf16>)", SM_90, PTX81>;
def __nvvm_fma_rn_oob_relu_bf16x2 : NVPTXBuiltinSMAndPTX<"_Vector<2, __bf16>(_Vector<2, __bf16>, _Vector<2, __bf16>, _Vector<2, __bf16>)", SM_90, PTX81>;
def __nvvm_fma_rn_ftz_f : NVPTXBuiltin<"float(float, float, float)">;
def __nvvm_fma_rn_f : NVPTXBuiltin<"float(float, float, float)">;
def __nvvm_fma_rz_ftz_f : NVPTXBuiltin<"float(float, float, float)">;
Expand Down Expand Up @@ -446,6 +454,11 @@ def __nvvm_rsqrt_approx_d : NVPTXBuiltin<"double(double)">;

// Add

def __nvvm_add_rn_sat_f16 : NVPTXBuiltinSMAndPTX<"__fp16(__fp16, __fp16)", SM_53, PTX42>;
def __nvvm_add_rn_ftz_sat_f16 : NVPTXBuiltinSMAndPTX<"__fp16(__fp16, __fp16)", SM_53, PTX42>;
def __nvvm_add_rn_sat_f16x2 : NVPTXBuiltinSMAndPTX<"_Vector<2, __fp16>(_Vector<2, __fp16>, _Vector<2, __fp16>)", SM_53, PTX42>;
def __nvvm_add_rn_ftz_sat_f16x2 : NVPTXBuiltinSMAndPTX<"_Vector<2, __fp16>(_Vector<2, __fp16>, _Vector<2, __fp16>)", SM_53, PTX42>;

def __nvvm_add_rn_ftz_f : NVPTXBuiltin<"float(float, float)">;
def __nvvm_add_rn_f : NVPTXBuiltin<"float(float, float)">;
def __nvvm_add_rz_ftz_f : NVPTXBuiltin<"float(float, float)">;
Expand All @@ -460,6 +473,13 @@ def __nvvm_add_rz_d : NVPTXBuiltin<"double(double, double)">;
def __nvvm_add_rm_d : NVPTXBuiltin<"double(double, double)">;
def __nvvm_add_rp_d : NVPTXBuiltin<"double(double, double)">;

// Mul

def __nvvm_mul_rn_sat_f16 : NVPTXBuiltinSMAndPTX<"__fp16(__fp16, __fp16)", SM_53, PTX42>;
def __nvvm_mul_rn_ftz_sat_f16 : NVPTXBuiltinSMAndPTX<"__fp16(__fp16, __fp16)", SM_53, PTX42>;
def __nvvm_mul_rn_sat_f16x2 : NVPTXBuiltinSMAndPTX<"_Vector<2, __fp16>(_Vector<2, __fp16>, _Vector<2, __fp16>)", SM_53, PTX42>;
def __nvvm_mul_rn_ftz_sat_f16x2 : NVPTXBuiltinSMAndPTX<"_Vector<2, __fp16>(_Vector<2, __fp16>, _Vector<2, __fp16>)", SM_53, PTX42>;

// Convert

def __nvvm_d2f_rn_ftz : NVPTXBuiltin<"float(double)">;
Expand Down
55 changes: 55 additions & 0 deletions clang/test/CodeGen/builtins-nvptx.c
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,9 @@
// RUN: %clang_cc1 -ffp-contract=off -triple nvptx64-unknown-unknown -target-cpu sm_80 -target-feature +ptx81 -DPTX=81 \
// RUN: -disable-llvm-optzns -fcuda-is-device -emit-llvm -o - -x cuda %s \
// RUN: | FileCheck -check-prefix=CHECK -check-prefix=CHECK_PTX81_SM80 %s
// RUN: %clang_cc1 -ffp-contract=off -triple nvptx64-unknown-unknown -target-cpu sm_90 -target-feature +ptx81 -DPTX=81\
// RUN: -disable-llvm-optzns -fcuda-is-device -emit-llvm -o - -x cuda %s \
// RUN: | FileCheck -check-prefix=CHECK -check-prefix=CHECK_PTX81_SM90 %s
// RUN: %clang_cc1 -ffp-contract=off -triple nvptx64-unknown-unknown -target-cpu sm_90 -target-feature +ptx78 -DPTX=78 \
// RUN: -disable-llvm-optzns -fcuda-is-device -emit-llvm -o - -x cuda %s \
// RUN: | FileCheck -check-prefix=CHECK -check-prefix=CHECK_PTX78_SM90 %s
Expand Down Expand Up @@ -1519,3 +1522,55 @@ __device__ void nvvm_min_max_sm86() {
#endif
// CHECK: ret void
}

#define F16 (__fp16)0.1f
#define F16_2 (__fp16)0.2f
#define F16X2 {(__fp16)0.1f, (__fp16)0.1f}
#define F16X2_2 {(__fp16)0.2f, (__fp16)0.2f}

// CHECK-LABEL: nvvm_add_mul_f16_sat
__device__ void nvvm_add_mul_f16_sat() {
// CHECK: call half @llvm.nvvm.add.rn.sat.f16
__nvvm_add_rn_sat_f16(F16, F16_2);
// CHECK: call half @llvm.nvvm.add.rn.ftz.sat.f16
__nvvm_add_rn_ftz_sat_f16(F16, F16_2);
// CHECK: call <2 x half> @llvm.nvvm.add.rn.sat.f16x2
__nvvm_add_rn_sat_f16x2(F16X2, F16X2_2);
// CHECK: call <2 x half> @llvm.nvvm.add.rn.ftz.sat.f16x2
__nvvm_add_rn_ftz_sat_f16x2(F16X2, F16X2_2);

// CHECK: call half @llvm.nvvm.mul.rn.sat.f16
__nvvm_mul_rn_sat_f16(F16, F16_2);
// CHECK: call half @llvm.nvvm.mul.rn.ftz.sat.f16
__nvvm_mul_rn_ftz_sat_f16(F16, F16_2);
// CHECK: call <2 x half> @llvm.nvvm.mul.rn.sat.f16x2
__nvvm_mul_rn_sat_f16x2(F16X2, F16X2_2);
// CHECK: call <2 x half> @llvm.nvvm.mul.rn.ftz.sat.f16x2
__nvvm_mul_rn_ftz_sat_f16x2(F16X2, F16X2_2);

// CHECK: ret void
}

// CHECK-LABEL: nvvm_fma_oob
__device__ void nvvm_fma_oob() {
#if __CUDA_ARCH__ >= 900 && (PTX >= 81)
// CHECK_PTX81_SM90: call half @llvm.nvvm.fma.rn.oob.f16
__nvvm_fma_rn_oob_f16(F16, F16_2, F16_2);
// CHECK_PTX81_SM90: call half @llvm.nvvm.fma.rn.oob.relu.f16
__nvvm_fma_rn_oob_relu_f16(F16, F16_2, F16_2);
// CHECK_PTX81_SM90: call <2 x half> @llvm.nvvm.fma.rn.oob.f16x2
__nvvm_fma_rn_oob_f16x2(F16X2, F16X2_2, F16X2_2);
// CHECK_PTX81_SM90: call <2 x half> @llvm.nvvm.fma.rn.oob.relu.f16x2
__nvvm_fma_rn_oob_relu_f16x2(F16X2, F16X2_2, F16X2_2);

// CHECK_PTX81_SM90: call bfloat @llvm.nvvm.fma.rn.oob.bf16
__nvvm_fma_rn_oob_bf16(BF16, BF16_2, BF16_2);
// CHECK_PTX81_SM90: call bfloat @llvm.nvvm.fma.rn.oob.relu.bf16
__nvvm_fma_rn_oob_relu_bf16(BF16, BF16_2, BF16_2);
// CHECK_PTX81_SM90: call <2 x bfloat> @llvm.nvvm.fma.rn.oob.bf16x2
__nvvm_fma_rn_oob_bf16x2(BF16X2, BF16X2_2, BF16X2_2);
// CHECK_PTX81_SM90: call <2 x bfloat> @llvm.nvvm.fma.rn.oob.relu.bf16x2
__nvvm_fma_rn_oob_relu_bf16x2(BF16X2, BF16X2_2, BF16X2_2);
#endif
// CHECK: ret void
}
58 changes: 50 additions & 8 deletions llvm/include/llvm/IR/IntrinsicsNVVM.td
Original file line number Diff line number Diff line change
Expand Up @@ -1490,16 +1490,38 @@ let TargetPrefix = "nvvm" in {
def int_nvvm_fma_rn # ftz # variant # _f16x2 :
PureIntrinsic<[llvm_v2f16_ty],
[llvm_v2f16_ty, llvm_v2f16_ty, llvm_v2f16_ty]>;

def int_nvvm_fma_rn # ftz # variant # _bf16 : NVVMBuiltin,
PureIntrinsic<[llvm_bfloat_ty],
[llvm_bfloat_ty, llvm_bfloat_ty, llvm_bfloat_ty]>;

def int_nvvm_fma_rn # ftz # variant # _bf16x2 : NVVMBuiltin,
PureIntrinsic<[llvm_v2bf16_ty],
[llvm_v2bf16_ty, llvm_v2bf16_ty, llvm_v2bf16_ty]>;
} // ftz
} // variant

foreach relu = ["", "_relu"] in {
def int_nvvm_fma_rn # relu # _bf16 : NVVMBuiltin,
PureIntrinsic<[llvm_bfloat_ty],
[llvm_bfloat_ty, llvm_bfloat_ty, llvm_bfloat_ty]>;

def int_nvvm_fma_rn # relu # _bf16x2 : NVVMBuiltin,
PureIntrinsic<[llvm_v2bf16_ty],
[llvm_v2bf16_ty, llvm_v2bf16_ty, llvm_v2bf16_ty]>;
} // relu

// oob (out-of-bounds) - clamps the result to 0 if either of the operands is
// OOB NaN value.
foreach relu = ["", "_relu"] in {
def int_nvvm_fma_rn_oob # relu # _f16 : NVVMBuiltin,
PureIntrinsic<[llvm_half_ty],
[llvm_half_ty, llvm_half_ty, llvm_half_ty]>;

def int_nvvm_fma_rn_oob # relu # _f16x2 : NVVMBuiltin,
PureIntrinsic<[llvm_v2f16_ty],
[llvm_v2f16_ty, llvm_v2f16_ty, llvm_v2f16_ty]>;

def int_nvvm_fma_rn_oob # relu # _bf16 : NVVMBuiltin,
PureIntrinsic<[llvm_bfloat_ty],
[llvm_bfloat_ty, llvm_bfloat_ty, llvm_bfloat_ty]>;

def int_nvvm_fma_rn_oob # relu # _bf16x2 : NVVMBuiltin,
PureIntrinsic<[llvm_v2bf16_ty],
[llvm_v2bf16_ty, llvm_v2bf16_ty, llvm_v2bf16_ty]>;
} // relu

foreach rnd = ["rn", "rz", "rm", "rp"] in {
foreach ftz = ["", "_ftz"] in
Expand Down Expand Up @@ -1567,6 +1589,15 @@ let TargetPrefix = "nvvm" in {
//
// Add
//
foreach ftz = ["", "_ftz"] in {
def int_nvvm_add_rn # ftz # _sat_f16 : NVVMBuiltin,
PureIntrinsic<[llvm_half_ty], [llvm_half_ty, llvm_half_ty]>;

def int_nvvm_add_rn # ftz # _sat_f16x2 : NVVMBuiltin,
PureIntrinsic<[llvm_v2f16_ty], [llvm_v2f16_ty, llvm_v2f16_ty]>;

} // ftz

let IntrProperties = [IntrNoMem, IntrSpeculatable, Commutative] in {
foreach rnd = ["rn", "rz", "rm", "rp"] in {
foreach ftz = ["", "_ftz"] in
Expand All @@ -1578,6 +1609,17 @@ let TargetPrefix = "nvvm" in {
}
}

//
// Mul
//
foreach ftz = ["", "_ftz"] in {
def int_nvvm_mul_rn # ftz # _sat_f16 : NVVMBuiltin,
PureIntrinsic<[llvm_half_ty], [llvm_half_ty, llvm_half_ty]>;

def int_nvvm_mul_rn # ftz # _sat_f16x2 : NVVMBuiltin,
PureIntrinsic<[llvm_v2f16_ty], [llvm_v2f16_ty, llvm_v2f16_ty]>;
} // ftz

//
// Dot Product
//
Expand Down
8 changes: 0 additions & 8 deletions llvm/lib/IR/AutoUpgrade.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1106,16 +1106,8 @@ static Intrinsic::ID shouldUpgradeNVPTXBF16Intrinsic(StringRef Name) {
return StringSwitch<Intrinsic::ID>(Name)
.Case("bf16", Intrinsic::nvvm_fma_rn_bf16)
.Case("bf16x2", Intrinsic::nvvm_fma_rn_bf16x2)
.Case("ftz.bf16", Intrinsic::nvvm_fma_rn_ftz_bf16)
.Case("ftz.bf16x2", Intrinsic::nvvm_fma_rn_ftz_bf16x2)
.Case("ftz.relu.bf16", Intrinsic::nvvm_fma_rn_ftz_relu_bf16)
.Case("ftz.relu.bf16x2", Intrinsic::nvvm_fma_rn_ftz_relu_bf16x2)
.Case("ftz.sat.bf16", Intrinsic::nvvm_fma_rn_ftz_sat_bf16)
.Case("ftz.sat.bf16x2", Intrinsic::nvvm_fma_rn_ftz_sat_bf16x2)
.Case("relu.bf16", Intrinsic::nvvm_fma_rn_relu_bf16)
.Case("relu.bf16x2", Intrinsic::nvvm_fma_rn_relu_bf16x2)
.Case("sat.bf16", Intrinsic::nvvm_fma_rn_sat_bf16)
.Case("sat.bf16x2", Intrinsic::nvvm_fma_rn_sat_bf16x2)
.Default(Intrinsic::not_intrinsic);

if (Name.consume_front("fmax."))
Expand Down
64 changes: 56 additions & 8 deletions llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -866,14 +866,28 @@ NVPTXTargetLowering::NVPTXTargetLowering(const NVPTXTargetMachine &TM,
setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand);

// We have some custom DAG combine patterns for these nodes
setTargetDAGCombine(
{ISD::ADD, ISD::AND, ISD::EXTRACT_VECTOR_ELT,
ISD::FADD, ISD::FMAXNUM, ISD::FMINNUM,
ISD::FMAXIMUM, ISD::FMINIMUM, ISD::FMAXIMUMNUM,
ISD::FMINIMUMNUM, ISD::MUL, ISD::SHL,
ISD::SREM, ISD::UREM, ISD::VSELECT,
ISD::BUILD_VECTOR, ISD::ADDRSPACECAST, ISD::LOAD,
ISD::STORE, ISD::ZERO_EXTEND, ISD::SIGN_EXTEND});
setTargetDAGCombine({ISD::ADD,
ISD::AND,
ISD::EXTRACT_VECTOR_ELT,
ISD::FADD,
ISD::FMAXNUM,
ISD::FMINNUM,
ISD::FMAXIMUM,
ISD::FMINIMUM,
ISD::FMAXIMUMNUM,
ISD::FMINIMUMNUM,
ISD::MUL,
ISD::SHL,
ISD::SREM,
ISD::UREM,
ISD::VSELECT,
ISD::BUILD_VECTOR,
ISD::ADDRSPACECAST,
ISD::LOAD,
ISD::STORE,
ISD::ZERO_EXTEND,
ISD::SIGN_EXTEND,
ISD::INTRINSIC_WO_CHAIN});

// setcc for f16x2 and bf16x2 needs special handling to prevent
// legalizer's attempt to scalarize it due to v2i1 not being legal.
Expand Down Expand Up @@ -6504,6 +6518,38 @@ static SDValue sinkProxyReg(SDValue R, SDValue Chain,
}
}

// Combine add.sat(a, fneg(b)) -> sub.sat(a, b)
static SDValue combineAddSatWithNeg(SDNode *N, SelectionDAG &DAG,
unsigned SubOpc) {
SDValue Op2 = N->getOperand(2);

if (Op2.getOpcode() != ISD::FNEG)
return SDValue();

SDLoc DL(N);
return DAG.getNode(SubOpc, DL, N->getValueType(0), N->getOperand(1),
Op2.getOperand(0));
}

static SDValue combineIntrinsicWOChain(SDNode *N,
TargetLowering::DAGCombinerInfo &DCI,
const NVPTXSubtarget &STI) {
unsigned IntID = N->getConstantOperandVal(0);

switch (IntID) {
case Intrinsic::nvvm_add_rn_sat_f16:
return combineAddSatWithNeg(N, DCI.DAG, NVPTXISD::SUB_RN_SAT_F16);
case Intrinsic::nvvm_add_rn_ftz_sat_f16:
return combineAddSatWithNeg(N, DCI.DAG, NVPTXISD::SUB_RN_FTZ_SAT_F16);
case Intrinsic::nvvm_add_rn_sat_f16x2:
return combineAddSatWithNeg(N, DCI.DAG, NVPTXISD::SUB_RN_SAT_F16X2);
case Intrinsic::nvvm_add_rn_ftz_sat_f16x2:
return combineAddSatWithNeg(N, DCI.DAG, NVPTXISD::SUB_RN_FTZ_SAT_F16X2);
default:
return SDValue();
}
}

static SDValue combineProxyReg(SDNode *N,
TargetLowering::DAGCombinerInfo &DCI) {

Expand Down Expand Up @@ -6570,6 +6616,8 @@ SDValue NVPTXTargetLowering::PerformDAGCombine(SDNode *N,
return combineSTORE(N, DCI, STI);
case ISD::VSELECT:
return PerformVSELECTCombine(N, DCI);
case ISD::INTRINSIC_WO_CHAIN:
return combineIntrinsicWOChain(N, DCI, STI);
}
return SDValue();
}
Expand Down
Loading
Loading