Skip to content

Commit

Permalink
[flang] Try to lower math intrinsics to math operations first.
Browse files Browse the repository at this point in the history
This commit changes how math intrinsics are lowered: we, first,
try to lower them into MLIR operations or libm calls via
mathOperations table and only then fallback to pgmath runtime calls.

The pgmath fallback is needed, because mathOperations does not
support all intrinsics that pgmath supports. The main purpose
of this change is to get rid of llvmIntrinsics table so that
we do not have to update both llvmIntrinsics and mathOperations
when adding new intrinsic support.

mathOperations lowering should phase out pgmath lowering, when
more operations are available (e.g. power operations being
added in D129809 and D129811; complex type operations from
Complex dialect).

Differential Revision: https://reviews.llvm.org/D130129
  • Loading branch information
vzakhari committed Jul 22, 2022
1 parent fa3c770 commit f5759ad
Show file tree
Hide file tree
Showing 19 changed files with 684 additions and 985 deletions.
152 changes: 37 additions & 115 deletions flang/lib/Lower/IntrinsicCall.cpp
Expand Up @@ -994,37 +994,15 @@ static llvm::cl::opt<bool> outlineAllIntrinsics(
// Math runtime description and matching utility
//===----------------------------------------------------------------------===//

/// Command line option to control how math operations are lowered
/// into MLIR.
/// Going forward, most of the math operations have to be lowered
/// to some MLIR dialect operations or libm calls, if the corresponding
/// MLIR operation is not available or not reasonable to create
/// (e.g. there are no known optimization opportunities for the math
/// operation in MLIR).
///
/// In general, exposing MLIR operations early can potentially enable more
/// MLIR optimizations.
llvm::cl::opt<bool> lowerEarlyToLibCall(
"lower-math-early",
llvm::cl::desc("Controls when to lower Math intrinsics to library calls"),
llvm::cl::init(true));

/// Command line option to modify math runtime behavior used to implement
/// intrinsics. This option applies both to early and late math-lowering modes.
enum MathRuntimeVersion {
fastVersion,
relaxedVersion,
preciseVersion,
llvmOnly
};
enum MathRuntimeVersion { fastVersion, relaxedVersion, preciseVersion };
llvm::cl::opt<MathRuntimeVersion> mathRuntimeVersion(
"math-runtime", llvm::cl::desc("Select math operations' runtime behavior:"),
llvm::cl::values(
clEnumValN(fastVersion, "fast", "use fast runtime behavior"),
clEnumValN(relaxedVersion, "relaxed", "use relaxed runtime behavior"),
clEnumValN(preciseVersion, "precise", "use precise runtime behavior"),
clEnumValN(llvmOnly, "llvm",
"only use LLVM intrinsics (may be incomplete)")),
clEnumValN(preciseVersion, "precise", "use precise runtime behavior")),
llvm::cl::init(fastVersion));

struct RuntimeFunction {
Expand Down Expand Up @@ -1271,59 +1249,6 @@ static constexpr MathOperation mathOperations[] = {
{"tanh", "tanh", genF64F64FuncType, genMathOp<mlir::math::TanhOp>},
};

// Note: These are also defined as operations in LLVM dialect. See if this
// can be use and has advantages.
// TODO: remove this table, since the late math lowering should
// replace it and generate proper MLIR operations rather
// than llvm intrinsic calls, which still look like generic
// calls to MLIR and do not enable many optimizations.
// When late math lowering is able to handle all math operations
// described in pgmath.h.inc and in the table below, we can
// switch to it by default.
static constexpr RuntimeFunction llvmIntrinsics[] = {
{"abs", "llvm.fabs.f32", genF32F32FuncType},
{"abs", "llvm.fabs.f64", genF64F64FuncType},
{"abs", "llvm.fabs.f128", genF128F128FuncType},
{"aint", "llvm.trunc.f32", genF32F32FuncType},
{"aint", "llvm.trunc.f64", genF64F64FuncType},
{"anint", "llvm.round.f32", genF32F32FuncType},
{"anint", "llvm.round.f64", genF64F64FuncType},
{"atan", "atanf", genF32F32FuncType},
{"atan", "atan", genF64F64FuncType},
// ceil is used for CEILING but is different, it returns a real.
{"ceil", "llvm.ceil.f32", genF32F32FuncType},
{"ceil", "llvm.ceil.f64", genF64F64FuncType},
{"cos", "llvm.cos.f32", genF32F32FuncType},
{"cos", "llvm.cos.f64", genF64F64FuncType},
{"cosh", "coshf", genF32F32FuncType},
{"cosh", "cosh", genF64F64FuncType},
{"exp", "llvm.exp.f32", genF32F32FuncType},
{"exp", "llvm.exp.f64", genF64F64FuncType},
// llvm.floor is used for FLOOR, but returns real.
{"floor", "llvm.floor.f32", genF32F32FuncType},
{"floor", "llvm.floor.f64", genF64F64FuncType},
{"log", "llvm.log.f32", genF32F32FuncType},
{"log", "llvm.log.f64", genF64F64FuncType},
{"log10", "llvm.log10.f32", genF32F32FuncType},
{"log10", "llvm.log10.f64", genF64F64FuncType},
{"nint", "llvm.lround.i64.f64", genIntF64FuncType<64>},
{"nint", "llvm.lround.i64.f32", genIntF32FuncType<64>},
{"nint", "llvm.lround.i32.f64", genIntF64FuncType<32>},
{"nint", "llvm.lround.i32.f32", genIntF32FuncType<32>},
{"pow", "llvm.pow.f32", genF32F32F32FuncType},
{"pow", "llvm.pow.f64", genF64F64F64FuncType},
{"sign", "llvm.copysign.f32", genF32F32F32FuncType},
{"sign", "llvm.copysign.f64", genF64F64F64FuncType},
{"sign", "llvm.copysign.f80", genF80F80F80FuncType},
{"sign", "llvm.copysign.f128", genF128F128F128FuncType},
{"sin", "llvm.sin.f32", genF32F32FuncType},
{"sin", "llvm.sin.f64", genF64F64FuncType},
{"sinh", "sinhf", genF32F32FuncType},
{"sinh", "sinh", genF64F64FuncType},
{"sqrt", "llvm.sqrt.f32", genF32F32FuncType},
{"sqrt", "llvm.sqrt.f64", genF64F64FuncType},
};

// This helper class computes a "distance" between two function types.
// The distance measures how many narrowing conversions of actual arguments
// and result of "from" must be made in order to use "to" instead of "from".
Expand Down Expand Up @@ -1592,35 +1517,19 @@ static mlir::func::FuncOp getRuntimeFunction(mlir::Location loc,
static constexpr RtMap pgmathP(pgmathPrecise);
static_assert(pgmathP.Verify() && "map must be sorted");

if (mathRuntimeVersion == fastVersion) {
if (mathRuntimeVersion == fastVersion)
match = searchFunctionInLibrary(loc, builder, pgmathF, name, funcType,
&bestNearMatch, bestMatchDistance);
} else if (mathRuntimeVersion == relaxedVersion) {
else if (mathRuntimeVersion == relaxedVersion)
match = searchFunctionInLibrary(loc, builder, pgmathR, name, funcType,
&bestNearMatch, bestMatchDistance);
} else if (mathRuntimeVersion == preciseVersion) {
else if (mathRuntimeVersion == preciseVersion)
match = searchFunctionInLibrary(loc, builder, pgmathP, name, funcType,
&bestNearMatch, bestMatchDistance);
} else {
assert(mathRuntimeVersion == llvmOnly && "unknown math runtime");
}
if (match)
return match;

// Go through llvm intrinsics if not exact match in libpgmath or if
// mathRuntimeVersion == llvmOnly
static constexpr RtMap llvmIntr(llvmIntrinsics);
static_assert(llvmIntr.Verify() && "map must be sorted");
if (mlir::func::FuncOp exactMatch =
searchFunctionInLibrary(loc, builder, llvmIntr, name, funcType,
&bestNearMatch, bestMatchDistance))
return exactMatch;

if (bestNearMatch != nullptr) {
checkPrecisionLoss(name, funcType, bestMatchDistance, loc);
return getFuncOp(loc, builder, *bestNearMatch);
}
return {};
else
llvm_unreachable("unsupported mathRuntimeVersion");

return match;
}

/// Helpers to get function type from arguments and result type.
Expand Down Expand Up @@ -2010,24 +1919,37 @@ IntrinsicLibrary::getRuntimeCallGenerator(llvm::StringRef name,
mlir::func::FuncOp funcOp;
mlir::FunctionType actualFuncType;
const MathOperation *mathOp = nullptr;
if (!lowerEarlyToLibCall) {
// Look for a dedicated math operation generator, which
// normally produces a single MLIR operation implementing
// the math operation.
// If not found fall back to a runtime function lookup.
const MathOperation *bestNearMatch = nullptr;
FunctionDistance bestMatchDistance;
mathOp = searchMathOperation(builder, name, soughtFuncType, &bestNearMatch,
bestMatchDistance);
if (!mathOp && bestNearMatch) {
// Use the best near match, optionally issuing an error,
// if types conversions cause precision loss.

// Look for a dedicated math operation generator, which
// normally produces a single MLIR operation implementing
// the math operation.
// If not found fall back to a runtime function lookup.
const MathOperation *bestNearMatch = nullptr;
FunctionDistance bestMatchDistance;
mathOp = searchMathOperation(builder, name, soughtFuncType, &bestNearMatch,
bestMatchDistance);
if (!mathOp && bestNearMatch) {
// Use the best near match, optionally issuing an error,
// if types conversions cause precision loss.
bool useBestNearMatch = true;
// TODO: temporary workaround to avoid using math::PowFOp
// for pow(fp, i64) case and fall back to pgmath runtime.
// When proper Math dialect operations are available
// and added into mathOperations table, this can be removed.
// This is WIP in D129812.
if (name == "pow" && soughtFuncType.getInput(0).isa<mlir::FloatType>())
if (auto exponentTy =
soughtFuncType.getInput(1).dyn_cast<mlir::IntegerType>())
useBestNearMatch = exponentTy.getWidth() != 64;

if (useBestNearMatch) {
checkPrecisionLoss(name, soughtFuncType, bestMatchDistance, loc);
mathOp = bestNearMatch;
}
if (mathOp)
actualFuncType = mathOp->typeGenerator(builder.getContext());
}
if (mathOp)
actualFuncType = mathOp->typeGenerator(builder.getContext());

if (!mathOp)
if ((funcOp = getRuntimeFunction(loc, builder, name, soughtFuncType)))
actualFuncType = funcOp.getFunctionType();
Expand Down Expand Up @@ -4529,7 +4451,7 @@ mlir::Value Fortran::lower::genPow(fir::FirOpBuilder &builder,
mlir::Value x, mlir::Value y) {
// TODO: since there is no libm version of pow with integer exponent,
// we have to provide an alternative implementation for
// "precise/strict" FP mode and (!lowerEarlyToLibCall).
// "precise/strict" FP mode.
// One option is to generate internal function with inlined
// implementation and mark it 'strictfp'.
// Another option is to implement it in Fortran runtime library
Expand Down
File renamed without changes.
14 changes: 7 additions & 7 deletions flang/test/Lower/Intrinsics/abs.f90
Expand Up @@ -36,7 +36,7 @@ subroutine abs_testi16(a, b)
subroutine abs_testh(a, b)
! CHECK: %[[VAL_2:.*]] = fir.load %[[VAL_0]] : !fir.ref<f16>
! CHECK: %[[VAL_2_1:.*]] = fir.convert %[[VAL_2]] : (f16) -> f32
! CHECK: %[[VAL_3:.*]] = fir.call @llvm.fabs.f32(%[[VAL_2_1]]) : (f32) -> f32
! CHECK: %[[VAL_3:.*]] = math.abs %[[VAL_2_1]] : f32
! CHECK: %[[VAL_3_1:.*]] = fir.convert %[[VAL_3]] : (f32) -> f16
! CHECK: fir.store %[[VAL_3_1]] to %[[VAL_1]] : !fir.ref<f16>
! CHECK: return
Expand All @@ -49,7 +49,7 @@ subroutine abs_testh(a, b)
subroutine abs_testb(a, b)
! CHECK: %[[VAL_2:.*]] = fir.load %[[VAL_0]] : !fir.ref<bf16>
! CHECK: %[[VAL_2_1:.*]] = fir.convert %[[VAL_2]] : (bf16) -> f32
! CHECK: %[[VAL_3:.*]] = fir.call @llvm.fabs.f32(%[[VAL_2_1]]) : (f32) -> f32
! CHECK: %[[VAL_3:.*]] = math.abs %[[VAL_2_1]] : f32
! CHECK: %[[VAL_3_1:.*]] = fir.convert %[[VAL_3]] : (f32) -> bf16
! CHECK: fir.store %[[VAL_3_1]] to %[[VAL_1]] : !fir.ref<bf16>
! CHECK: return
Expand All @@ -61,7 +61,7 @@ subroutine abs_testb(a, b)
! CHECK-SAME: %[[VAL_0:.*]]: !fir.ref<f32>{{.*}}, %[[VAL_1:.*]]: !fir.ref<f32>{{.*}}) {
subroutine abs_testr(a, b)
! CHECK: %[[VAL_2:.*]] = fir.load %[[VAL_0]] : !fir.ref<f32>
! CHECK: %[[VAL_3:.*]] = fir.call @llvm.fabs.f32(%[[VAL_2]]) : (f32) -> f32
! CHECK: %[[VAL_3:.*]] = math.abs %[[VAL_2]] : f32
! CHECK: fir.store %[[VAL_3]] to %[[VAL_1]] : !fir.ref<f32>
! CHECK: return
real :: a, b
Expand All @@ -72,7 +72,7 @@ subroutine abs_testr(a, b)
! CHECK-SAME: %[[VAL_0:.*]]: !fir.ref<f64>{{.*}}, %[[VAL_1:.*]]: !fir.ref<f64>{{.*}}) {
subroutine abs_testd(a, b)
! CHECK: %[[VAL_2:.*]] = fir.load %[[VAL_0]] : !fir.ref<f64>
! CHECK: %[[VAL_3:.*]] = fir.call @llvm.fabs.f64(%[[VAL_2]]) : (f64) -> f64
! CHECK: %[[VAL_3:.*]] = math.abs %[[VAL_2]] : f64
! CHECK: fir.store %[[VAL_3]] to %[[VAL_1]] : !fir.ref<f64>
! CHECK: return
real(kind=8) :: a, b
Expand All @@ -83,7 +83,7 @@ subroutine abs_testd(a, b)
! CHECK-SAME: %[[VAL_0:.*]]: !fir.ref<f128>{{.*}}, %[[VAL_1:.*]]: !fir.ref<f128>{{.*}}) {
subroutine abs_testr16(a, b)
! CHECK: %[[VAL_2:.*]] = fir.load %[[VAL_0]] : !fir.ref<f128>
! CHECK: %[[VAL_3:.*]] = fir.call @llvm.fabs.f128(%[[VAL_2]]) : (f128) -> f128
! CHECK: %[[VAL_3:.*]] = math.abs %[[VAL_2]] : f128
! CHECK: fir.store %[[VAL_3]] to %[[VAL_1]] : !fir.ref<f128>
! CHECK: return
real(kind=16) :: a, b
Expand All @@ -96,7 +96,7 @@ subroutine abs_testzr(a, b)
! CHECK: %[[VAL_2:.*]] = fir.load %[[VAL_0]] : !fir.ref<!fir.complex<4>>
! CHECK: %[[VAL_3:.*]] = fir.extract_value %[[VAL_2]], [0 : index] : (!fir.complex<4>) -> f32
! CHECK: %[[VAL_4:.*]] = fir.extract_value %[[VAL_2]], [1 : index] : (!fir.complex<4>) -> f32
! CHECK: %[[VAL_5:.*]] = fir.call @__mth_i_hypot(%[[VAL_3]], %[[VAL_4]]) : (f32, f32) -> f32
! CHECK: %[[VAL_5:.*]] = fir.call @hypotf(%[[VAL_3]], %[[VAL_4]]) : (f32, f32) -> f32
! CHECK: fir.store %[[VAL_5]] to %[[VAL_1]] : !fir.ref<f32>
! CHECK: return
complex :: a
Expand All @@ -110,7 +110,7 @@ subroutine abs_testzd(a, b)
! CHECK: %[[VAL_2:.*]] = fir.load %[[VAL_0]] : !fir.ref<!fir.complex<8>>
! CHECK: %[[VAL_3:.*]] = fir.extract_value %[[VAL_2]], [0 : index] : (!fir.complex<8>) -> f64
! CHECK: %[[VAL_4:.*]] = fir.extract_value %[[VAL_2]], [1 : index] : (!fir.complex<8>) -> f64
! CHECK: %[[VAL_5:.*]] = fir.call @__mth_i_dhypot(%[[VAL_3]], %[[VAL_4]]) : (f64, f64) -> f64
! CHECK: %[[VAL_5:.*]] = fir.call @hypot(%[[VAL_3]], %[[VAL_4]]) : (f64, f64) -> f64
! CHECK: fir.store %[[VAL_5]] to %[[VAL_1]] : !fir.ref<f64>
! CHECK: return
complex(kind=8) :: a
Expand Down
4 changes: 2 additions & 2 deletions flang/test/Lower/Intrinsics/anint.f90
Expand Up @@ -3,7 +3,7 @@
! CHECK-LABEL: anint_test
subroutine anint_test(a, b)
real :: a, b
! CHECK: fir.call @llvm.round.f32
! CHECK: "llvm.intr.round"
b = anint(a)
end subroutine


6 changes: 3 additions & 3 deletions flang/test/Lower/Intrinsics/ceiling.f90
Expand Up @@ -5,16 +5,16 @@ subroutine ceiling_test1(i, a)
integer :: i
real :: a
i = ceiling(a)
! CHECK: %[[f:.*]] = fir.call @llvm.ceil.f32
! CHECK: %[[f:.*]] = math.ceil %{{.*}} : f32
! CHECK: fir.convert %[[f]] : (f32) -> i32
end subroutine
! CHECK-LABEL: ceiling_test2
subroutine ceiling_test2(i, a)
integer(8) :: i
real :: a
i = ceiling(a, 8)
! CHECK: %[[f:.*]] = fir.call @llvm.ceil.f32
! CHECK: %[[f:.*]] = math.ceil %{{.*}} : f32
! CHECK: fir.convert %[[f]] : (f32) -> i64
end subroutine



4 changes: 2 additions & 2 deletions flang/test/Lower/Intrinsics/exp.f90
Expand Up @@ -43,12 +43,12 @@ subroutine exp_testcd(a, b)

! CHECK-LABEL: private @fir.exp.f32.f32
! CHECK-SAME: (%[[ARG32_OUTLINE:.*]]: f32) -> f32
! CHECK: %[[RESULT32_OUTLINE:.*]] = fir.call @__fs_exp_1(%[[ARG32_OUTLINE]]) : (f32) -> f32
! CHECK: %[[RESULT32_OUTLINE:.*]] = math.exp %[[ARG32_OUTLINE]] : f32
! CHECK: return %[[RESULT32_OUTLINE]] : f32

! CHECK-LABEL: private @fir.exp.f64.f64
! CHECK-SAME: (%[[ARG64_OUTLINE:.*]]: f64) -> f64
! CHECK: %[[RESULT64_OUTLINE:.*]] = fir.call @__fd_exp_1(%[[ARG64_OUTLINE]]) : (f64) -> f64
! CHECK: %[[RESULT64_OUTLINE:.*]] = math.exp %[[ARG64_OUTLINE]] : f64
! CHECK: return %[[RESULT64_OUTLINE]] : f64

! CHECK-LABEL: private @fir.exp.z4.z4
Expand Down
6 changes: 3 additions & 3 deletions flang/test/Lower/Intrinsics/floor.f90
Expand Up @@ -5,15 +5,15 @@ subroutine floor_test1(i, a)
integer :: i
real :: a
i = floor(a)
! CHECK: %[[f:.*]] = fir.call @llvm.floor.f32
! CHECK: %[[f:.*]] = math.floor %{{.*}} : f32
! CHECK: fir.convert %[[f]] : (f32) -> i32
end subroutine
! CHECK-LABEL: floor_test2
subroutine floor_test2(i, a)
integer(8) :: i
real :: a
i = floor(a, 8)
! CHECK: %[[f:.*]] = fir.call @llvm.floor.f32
! CHECK: %[[f:.*]] = math.floor %{{.*}} : f32
! CHECK: fir.convert %[[f]] : (f32) -> i64
end subroutine


8 changes: 4 additions & 4 deletions flang/test/Lower/Intrinsics/log.f90
Expand Up @@ -63,12 +63,12 @@ subroutine log10_testd(a, b)

! CHECK-LABEL: private @fir.log.f32.f32
! CHECK-SAME: (%[[ARG32_OUTLINE:.*]]: f32) -> f32
! CHECK: %[[RESULT32_OUTLINE:.*]] = fir.call @__fs_log_1(%[[ARG32_OUTLINE]]) : (f32) -> f32
! CHECK: %[[RESULT32_OUTLINE:.*]] = math.log %[[ARG32_OUTLINE]] : f32
! CHECK: return %[[RESULT32_OUTLINE]] : f32

! CHECK-LABEL: private @fir.log.f64.f64
! CHECK-SAME: (%[[ARG64_OUTLINE:.*]]: f64) -> f64
! CHECK: %[[RESULT64_OUTLINE:.*]] = fir.call @__fd_log_1(%[[ARG64_OUTLINE]]) : (f64) -> f64
! CHECK: %[[RESULT64_OUTLINE:.*]] = math.log %[[ARG64_OUTLINE]] : f64
! CHECK: return %[[RESULT64_OUTLINE]] : f64

! CHECK-LABEL: private @fir.log.z4.z4
Expand All @@ -83,10 +83,10 @@ subroutine log10_testd(a, b)

! CHECK-LABEL: private @fir.log10.f32.f32
! CHECK-SAME: (%[[ARG32_OUTLINE:.*]]: f32) -> f32
! CHECK: %[[RESULT32_OUTLINE:.*]] = fir.call @__fs_log10_1(%[[ARG32_OUTLINE]]) : (f32) -> f32
! CHECK: %[[RESULT32_OUTLINE:.*]] = math.log10 %[[ARG32_OUTLINE]] : f32
! CHECK: return %[[RESULT32_OUTLINE]] : f32

! CHECK-LABEL: private @fir.log10.f64.f64
! CHECK-SAME: (%[[ARG64_OUTLINE:.*]]: f64) -> f64
! CHECK: %[[RESULT64_OUTLINE:.*]] = fir.call @__fd_log10_1(%[[ARG64_OUTLINE]]) : (f64) -> f64
! CHECK: %[[RESULT64_OUTLINE:.*]] = math.log10 %[[ARG64_OUTLINE]] : f64
! CHECK: return %[[RESULT64_OUTLINE]] : f64
16 changes: 6 additions & 10 deletions flang/test/Lower/Intrinsics/math-runtime-options.f90
Expand Up @@ -4,8 +4,6 @@
! RUN: %flang_fc1 -emit-fir -mllvm -math-runtime=relaxed -mllvm -outline-intrinsics %s -o - | FileCheck %s --check-prefixes="FIR,RELAXED"
! RUN: bbc -emit-fir --math-runtime=precise -outline-intrinsics %s -o - | FileCheck %s --check-prefixes="FIR,PRECISE"
! RUN: %flang_fc1 -emit-fir -mllvm -math-runtime=precise -mllvm -outline-intrinsics %s -o - | FileCheck %s --check-prefixes="FIR,PRECISE"
! RUN: bbc -emit-fir --math-runtime=llvm -outline-intrinsics %s -o - | FileCheck %s --check-prefixes="FIR,LLVM"
! RUN: %flang_fc1 -emit-fir -mllvm -math-runtime=llvm -mllvm -outline-intrinsics %s -o - | FileCheck %s --check-prefixes="FIR,LLVM"

! CHECK-LABEL: cos_testr
subroutine cos_testr(a, b)
Expand All @@ -22,12 +20,10 @@ subroutine cos_testd(a, b)
end subroutine

! FIR: @fir.cos.f32.f32(%arg0: f32) -> f32 attributes
! FAST: fir.call @__fs_cos_1(%arg0) : (f32) -> f32
! RELAXED: fir.call @__rs_cos_1(%arg0) : (f32) -> f32
! PRECISE: fir.call @__ps_cos_1(%arg0) : (f32) -> f32
! LLVM: fir.call @llvm.cos.f32(%arg0) : (f32) -> f32
! FAST: math.cos %arg0 : f32
! RELAXED: math.cos %arg0 : f32
! PRECISE: fir.call @cosf(%arg0) : (f32) -> f32
! FIR: @fir.cos.f64.f64(%arg0: f64) -> f64
! FAST: fir.call @__fd_cos_1(%arg0) : (f64) -> f64
! RELAXED: fir.call @__rd_cos_1(%arg0) : (f64) -> f64
! PRECISE: fir.call @__pd_cos_1(%arg0) : (f64) -> f64
! LLVM: fir.call @llvm.cos.f64(%arg0) : (f64) -> f64
! FAST: math.cos %arg0 : f64
! RELAXED: math.cos %arg0 : f64
! PRECISE: fir.call @cos(%arg0) : (f64) -> f64

0 comments on commit f5759ad

Please sign in to comment.