Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
120 changes: 119 additions & 1 deletion stdlib/public/Platform/tgmath.swift.gyb
Original file line number Diff line number Diff line change
Expand Up @@ -20,11 +20,22 @@ public func fabs<T: FloatingPoint>(_ x: T) -> T {
}

@_transparent
// SWIFT_ENABLE_TENSORFLOW
@differentiable(
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
@differentiable(
// SWIFT_ENABLE_TENSORFLOW
@differentiable(

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

done in 27ca6a2

vjp: _vjpSqrt
where T : Differentiable & FloatingPoint, T == T.TangentVector
)
public func sqrt<T: FloatingPoint>(_ x: T) -> T {
return x.squareRoot()
}

@_transparent
// SWIFT_ENABLE_TENSORFLOW
@differentiable(
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
@differentiable(
// SWIFT_ENABLE_TENSORFLOW
@differentiable(

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

done in 27ca6a2

wrt: (x, y, z),
vjp: _vjpFma
where T : Differentiable & FloatingPoint, T == T.TangentVector
)
public func fma<T: FloatingPoint>(_ x: T, _ y: T, _ z: T) -> T {
return z.addingProduct(x, y)
}
Expand Down Expand Up @@ -82,6 +93,24 @@ public func frexp<T: BinaryFloatingPoint>(_ x: T) -> (T, Int) {
return (x.significand / 2, Int(x.exponent + 1))
}

// SWIFT_ENABLE_TENSORFLOW
@usableFromInline
func _vjpSqrt<T: FloatingPoint & Differentiable> (
_ x: T
) -> (T, (T) -> T) where T == T.TangentVector {
let value = x.squareRoot()
return (value, { v in v / (2 * value) })
}

@usableFromInline
func _vjpFma<T: FloatingPoint & Differentiable> (
_ x: T,
_ y: T,
_ z: T
) -> (T, (T) -> (T, T, T)) where T == T.TangentVector {
return (fma(x, y, z), { v in (v * y, v * x, v) })
}

%for T in ['Float','Double']:
@available(swift, deprecated: 4.2, renamed: "scalbn")
@_transparent
Expand All @@ -102,11 +131,27 @@ func _vjpExp(_ x: ${T}) -> (${T}, (${T}) -> ${T}) {
return (value, { v in value * v })
}

@usableFromInline
func _vjpExp2(_ x: ${T}) -> (${T}, (${T}) -> ${T}) {
let value = exp2(x)
return (value, { v in v * ${T}(M_LN2) * value })
}

@usableFromInline
func _vjpLog(_ x: ${T}) -> (${T}, (${T}) -> ${T}) {
return (log(x), { v in v / x })
}

@usableFromInline
func _vjpLog10(_ x: ${T}) -> (${T}, (${T}) -> ${T}) {
return (log10(x), { v in v * ${T}(M_LOG10E) / x })
}

@usableFromInline
func _vjpLog2(_ x: ${T}) -> (${T}, (${T}) -> ${T}) {
return (log2(x), { v in v / (${T}(M_LN2) * x) })
}

@usableFromInline
func _vjpSin(_ x: ${T}) -> (${T}, (${T}) -> ${T}) {
return (sin(x), { v in v * cos(x) })
Expand All @@ -122,6 +167,72 @@ func _vjpTan(_ x: ${T}) -> (${T}, (${T}) -> ${T}) {
let value = tan(x)
return (value, { v in v * (1 + value * value) })
}

@usableFromInline
func _vjpAsin(_ x: ${T}) -> (${T}, (${T}) -> ${T}) {
return (asin(x), { v in v / sqrt(1 - x * x) })
}

@usableFromInline
func _vjpAcos(_ x: ${T}) -> (${T}, (${T}) -> ${T}) {
return (acos(x), { v in -v / sqrt(1 - x * x) })
}

@usableFromInline
func _vjpAtan(_ x: ${T}) -> (${T}, (${T}) -> ${T}) {
return (atan(x), { v in v / (1 + x * x) })
}

@usableFromInline
func _vjpSinh(_ x: ${T}) -> (${T}, (${T}) -> ${T}) {
return (sinh(x), { v in v * cosh(x) })
}

@usableFromInline
func _vjpCosh(_ x: ${T}) -> (${T}, (${T}) -> ${T}) {
return (cosh(x), { v in v * sinh(x) })
}

@usableFromInline
func _vjpTanh(_ x: ${T}) -> (${T}, (${T}) -> ${T}) {
let value = tanh(x)
return (value, { v in v * (1 - value * value) })
}

@usableFromInline
func _vjpAsinh(_ x: ${T}) -> (${T}, (${T}) -> ${T}) {
return (asinh(x), { v in v / sqrt(1 + x * x) })
}

@usableFromInline
func _vjpAcosh(_ x: ${T}) -> (${T}, (${T}) -> ${T}) {
return (acosh(x), { v in v / sqrt(x * x - 1) })
}

@usableFromInline
func _vjpAtanh(_ x: ${T}) -> (${T}, (${T}) -> ${T}) {
return (atanh(x), { v in v / (1 - x * x) })
}

@usableFromInline
func _vjpExpm1(_ x: ${T}) -> (${T}, (${T}) -> ${T}) {
return (expm1(x), { v in exp(x) * v })
}

@usableFromInline
func _vjpLog1p(_ x: ${T}) -> (${T}, (${T}) -> ${T}) {
return (log1p(x), { v in v / (x + 1) })
}

@usableFromInline
func _vjpErf(_ x: ${T}) -> (${T}, (${T}) -> ${T}) {
return (erf(x), { v in v * ${T}(M_2_SQRTPI) * exp(-x * x) })
}

@usableFromInline
func _vjpErfc(_ x: ${T}) -> (${T}, (${T}) -> ${T}) {
return (erfc(x), { v in v * -${T}(M_2_SQRTPI) * exp(-x * x) })
}
% if T == 'Float80':
#endif
% end
Expand Down Expand Up @@ -201,7 +312,14 @@ UnaryIntrinsicFunctions = [
]

# SWIFT_ENABLE_TENSORFLOW
HasVJP = ["exp", "log", "tan", "cos", "sin"]
HasVJP = [
'acos', 'asin', 'atan', 'tan',
'acosh', 'asinh', 'atanh', 'cosh', 'sinh', 'tanh',
'expm1',
'log1p',
'erf', 'erfc',
'cos', 'sin', 'exp', 'exp2', 'log', 'log10', 'log2'
]

def AllFloatTypes():
for bits in allFloatBits:
Expand Down
31 changes: 26 additions & 5 deletions test/stdlib/tgmath.swift.gyb
Original file line number Diff line number Diff line change
Expand Up @@ -247,11 +247,32 @@ MathTests.test("${T}") {
// SWIFT_ENABLE_TENSORFLOW
% for T in ['Float', 'Float80']:
MathTests.test("gradient_${T}") {
expectEqualWithTolerance(7.3890560989306502274, gradient(at: 2.0 as ${T}, in: exp), ulps:16)
expectEqualWithTolerance(0.5, gradient(at: 2.0 as ${T}, in: log), ulps:16)
expectEqualWithTolerance(5.774399204041917612, gradient(at: 2.0 as ${T}, in: tan), ulps:16)
expectEqualWithTolerance(-0.416146836547142387, gradient(at: 2.0 as ${T}, in: sin), ulps:16)
expectEqualWithTolerance(-0.9092974268256816954, gradient(at: 2.0 as ${T}, in: cos), ulps:16)
expectEqualWithTolerance(7.3890560989306502274, gradient(at: 2.0 as ${T}, in: exp), ulps: 16)
expectEqualWithTolerance(2.772588722239781145, gradient(at: 2.0 as ${T}, in: exp2), ulps: 16)
expectEqualWithTolerance(7.3890560989306502274, gradient(at: 2.0 as ${T}, in: expm1), ulps: 16)
expectEqualWithTolerance(0.5, gradient(at: 2.0 as ${T}, in: log), ulps: 16)
expectEqualWithTolerance(0.21714724095162590833, gradient(at: 2.0 as ${T}, in: log10), ulps: 16)
expectEqualWithTolerance(0.7213475204444817278, gradient(at: 2.0 as ${T}, in: log2), ulps: 16)
expectEqualWithTolerance(0.33333333333333333334, gradient(at: 2.0 as ${T}, in: log1p), ulps: 16)
expectEqualWithTolerance(5.774399204041917612, gradient(at: 2.0 as ${T}, in: tan), ulps: 16)
expectEqualWithTolerance(-0.9092974268256816954, gradient(at: 2.0 as ${T}, in: cos), ulps: 16)
expectEqualWithTolerance(-0.416146836547142387, gradient(at: 2.0 as ${T}, in: sin), ulps: 16)
expectEqualWithTolerance(1.154700538379251529, gradient(at: 0.5 as ${T}, in: asin), ulps: 16)
expectEqualWithTolerance(-1.154700538379251529, gradient(at: 0.5 as ${T}, in: acos), ulps: 16)
expectEqualWithTolerance(0.8, gradient(at: 0.5 as ${T}, in: atan), ulps: 16)
expectEqualWithTolerance(3.7621956910836314597, gradient(at: 2.0 as ${T}, in: sinh), ulps: 16)
expectEqualWithTolerance(3.6268604078470187677, gradient(at: 2.0 as ${T}, in: cosh), ulps: 16)
expectEqualWithTolerance(0.07065082485316446565, gradient(at: 2.0 as ${T}, in: tanh), ulps: 16)
expectEqualWithTolerance(0.44721359549995793928, gradient(at: 2.0 as ${T}, in: asinh), ulps: 16)
expectEqualWithTolerance(0.5773502691896257645, gradient(at: 2.0 as ${T}, in: acosh), ulps: 16)
expectEqualWithTolerance(1.3333333333333333334, gradient(at: 0.5 as ${T}, in: atanh), ulps: 16)
expectEqualWithTolerance(0.020666985354092053575, gradient(at: 2.0 as ${T}, in: erf), ulps: 16)
expectEqualWithTolerance(-0.020666985354092053575, gradient(at: 2.0 as ${T}, in: erfc), ulps: 16)
expectEqualWithTolerance(0.35355339059327376222, gradient(at: 2.0 as ${T}, in: { sqrt($0) }), ulps: 16)
let fmaGrad = gradient(at: 4.0 as ${T}, 5.0 as ${T}, 6.0 as ${T}, in: { x, y, z in fma(x, y, z) })
expectEqualWithTolerance(5.0, fmaGrad.0, ulps: 16)
expectEqualWithTolerance(4.0, fmaGrad.1, ulps: 16)
expectEqualWithTolerance(1.0, fmaGrad.2, ulps: 16)
}
%end

Expand Down