diff --git a/stdlib/public/Platform/tgmath.swift.gyb b/stdlib/public/Platform/tgmath.swift.gyb index 80c7309d6b6d4..0d1526865353d 100644 --- a/stdlib/public/Platform/tgmath.swift.gyb +++ b/stdlib/public/Platform/tgmath.swift.gyb @@ -20,11 +20,22 @@ public func fabs(_ x: T) -> T { } @_transparent +// SWIFT_ENABLE_TENSORFLOW +@differentiable( + vjp: _vjpSqrt + where T : Differentiable & FloatingPoint, T == T.TangentVector +) public func sqrt(_ x: T) -> T { return x.squareRoot() } @_transparent +// SWIFT_ENABLE_TENSORFLOW +@differentiable( + wrt: (x, y, z), + vjp: _vjpFma + where T : Differentiable & FloatingPoint, T == T.TangentVector +) public func fma(_ x: T, _ y: T, _ z: T) -> T { return z.addingProduct(x, y) } @@ -82,6 +93,24 @@ public func frexp(_ x: T) -> (T, Int) { return (x.significand / 2, Int(x.exponent + 1)) } +// SWIFT_ENABLE_TENSORFLOW +@usableFromInline +func _vjpSqrt ( + _ x: T +) -> (T, (T) -> T) where T == T.TangentVector { + let value = x.squareRoot() + return (value, { v in v / (2 * value) }) +} + +@usableFromInline +func _vjpFma ( + _ x: T, + _ y: T, + _ z: T +) -> (T, (T) -> (T, T, T)) where T == T.TangentVector { + return (fma(x, y, z), { v in (v * y, v * x, v) }) +} + %for T in ['Float','Double']: @available(swift, deprecated: 4.2, renamed: "scalbn") @_transparent @@ -102,11 +131,27 @@ func _vjpExp(_ x: ${T}) -> (${T}, (${T}) -> ${T}) { return (value, { v in value * v }) } +@usableFromInline +func _vjpExp2(_ x: ${T}) -> (${T}, (${T}) -> ${T}) { + let value = exp2(x) + return (value, { v in v * ${T}(M_LN2) * value }) +} + @usableFromInline func _vjpLog(_ x: ${T}) -> (${T}, (${T}) -> ${T}) { return (log(x), { v in v / x }) } +@usableFromInline +func _vjpLog10(_ x: ${T}) -> (${T}, (${T}) -> ${T}) { + return (log10(x), { v in v * ${T}(M_LOG10E) / x }) +} + +@usableFromInline +func _vjpLog2(_ x: ${T}) -> (${T}, (${T}) -> ${T}) { + return (log2(x), { v in v / (${T}(M_LN2) * x) }) +} + @usableFromInline func _vjpSin(_ x: ${T}) -> (${T}, (${T}) -> ${T}) { return (sin(x), { v in v * cos(x) }) @@ -122,6 +167,72 @@ func _vjpTan(_ x: ${T}) -> (${T}, (${T}) -> ${T}) { let value = tan(x) return (value, { v in v * (1 + value * value) }) } + +@usableFromInline +func _vjpAsin(_ x: ${T}) -> (${T}, (${T}) -> ${T}) { + return (asin(x), { v in v / sqrt(1 - x * x) }) +} + +@usableFromInline +func _vjpAcos(_ x: ${T}) -> (${T}, (${T}) -> ${T}) { + return (acos(x), { v in -v / sqrt(1 - x * x) }) +} + +@usableFromInline +func _vjpAtan(_ x: ${T}) -> (${T}, (${T}) -> ${T}) { + return (atan(x), { v in v / (1 + x * x) }) +} + +@usableFromInline +func _vjpSinh(_ x: ${T}) -> (${T}, (${T}) -> ${T}) { + return (sinh(x), { v in v * cosh(x) }) +} + +@usableFromInline +func _vjpCosh(_ x: ${T}) -> (${T}, (${T}) -> ${T}) { + return (cosh(x), { v in v * sinh(x) }) +} + +@usableFromInline +func _vjpTanh(_ x: ${T}) -> (${T}, (${T}) -> ${T}) { + let value = tanh(x) + return (value, { v in v * (1 - value * value) }) +} + +@usableFromInline +func _vjpAsinh(_ x: ${T}) -> (${T}, (${T}) -> ${T}) { + return (asinh(x), { v in v / sqrt(1 + x * x) }) +} + +@usableFromInline +func _vjpAcosh(_ x: ${T}) -> (${T}, (${T}) -> ${T}) { + return (acosh(x), { v in v / sqrt(x * x - 1) }) +} + +@usableFromInline +func _vjpAtanh(_ x: ${T}) -> (${T}, (${T}) -> ${T}) { + return (atanh(x), { v in v / (1 - x * x) }) +} + +@usableFromInline +func _vjpExpm1(_ x: ${T}) -> (${T}, (${T}) -> ${T}) { + return (expm1(x), { v in exp(x) * v }) +} + +@usableFromInline +func _vjpLog1p(_ x: ${T}) -> (${T}, (${T}) -> ${T}) { + return (log1p(x), { v in v / (x + 1) }) +} + +@usableFromInline +func _vjpErf(_ x: ${T}) -> (${T}, (${T}) -> ${T}) { + return (erf(x), { v in v * ${T}(M_2_SQRTPI) * exp(-x * x) }) +} + +@usableFromInline +func _vjpErfc(_ x: ${T}) -> (${T}, (${T}) -> ${T}) { + return (erfc(x), { v in v * -${T}(M_2_SQRTPI) * exp(-x * x) }) +} % if T == 'Float80': #endif % end @@ -201,7 +312,14 @@ UnaryIntrinsicFunctions = [ ] # SWIFT_ENABLE_TENSORFLOW -HasVJP = ["exp", "log", "tan", "cos", "sin"] +HasVJP = [ + 'acos', 'asin', 'atan', 'tan', + 'acosh', 'asinh', 'atanh', 'cosh', 'sinh', 'tanh', + 'expm1', + 'log1p', + 'erf', 'erfc', + 'cos', 'sin', 'exp', 'exp2', 'log', 'log10', 'log2' +] def AllFloatTypes(): for bits in allFloatBits: diff --git a/test/stdlib/tgmath.swift.gyb b/test/stdlib/tgmath.swift.gyb index 88f74a76b2883..65341df93ab14 100644 --- a/test/stdlib/tgmath.swift.gyb +++ b/test/stdlib/tgmath.swift.gyb @@ -247,11 +247,32 @@ MathTests.test("${T}") { // SWIFT_ENABLE_TENSORFLOW % for T in ['Float', 'Float80']: MathTests.test("gradient_${T}") { - expectEqualWithTolerance(7.3890560989306502274, gradient(at: 2.0 as ${T}, in: exp), ulps:16) - expectEqualWithTolerance(0.5, gradient(at: 2.0 as ${T}, in: log), ulps:16) - expectEqualWithTolerance(5.774399204041917612, gradient(at: 2.0 as ${T}, in: tan), ulps:16) - expectEqualWithTolerance(-0.416146836547142387, gradient(at: 2.0 as ${T}, in: sin), ulps:16) - expectEqualWithTolerance(-0.9092974268256816954, gradient(at: 2.0 as ${T}, in: cos), ulps:16) + expectEqualWithTolerance(7.3890560989306502274, gradient(at: 2.0 as ${T}, in: exp), ulps: 16) + expectEqualWithTolerance(2.772588722239781145, gradient(at: 2.0 as ${T}, in: exp2), ulps: 16) + expectEqualWithTolerance(7.3890560989306502274, gradient(at: 2.0 as ${T}, in: expm1), ulps: 16) + expectEqualWithTolerance(0.5, gradient(at: 2.0 as ${T}, in: log), ulps: 16) + expectEqualWithTolerance(0.21714724095162590833, gradient(at: 2.0 as ${T}, in: log10), ulps: 16) + expectEqualWithTolerance(0.7213475204444817278, gradient(at: 2.0 as ${T}, in: log2), ulps: 16) + expectEqualWithTolerance(0.33333333333333333334, gradient(at: 2.0 as ${T}, in: log1p), ulps: 16) + expectEqualWithTolerance(5.774399204041917612, gradient(at: 2.0 as ${T}, in: tan), ulps: 16) + expectEqualWithTolerance(-0.9092974268256816954, gradient(at: 2.0 as ${T}, in: cos), ulps: 16) + expectEqualWithTolerance(-0.416146836547142387, gradient(at: 2.0 as ${T}, in: sin), ulps: 16) + expectEqualWithTolerance(1.154700538379251529, gradient(at: 0.5 as ${T}, in: asin), ulps: 16) + expectEqualWithTolerance(-1.154700538379251529, gradient(at: 0.5 as ${T}, in: acos), ulps: 16) + expectEqualWithTolerance(0.8, gradient(at: 0.5 as ${T}, in: atan), ulps: 16) + expectEqualWithTolerance(3.7621956910836314597, gradient(at: 2.0 as ${T}, in: sinh), ulps: 16) + expectEqualWithTolerance(3.6268604078470187677, gradient(at: 2.0 as ${T}, in: cosh), ulps: 16) + expectEqualWithTolerance(0.07065082485316446565, gradient(at: 2.0 as ${T}, in: tanh), ulps: 16) + expectEqualWithTolerance(0.44721359549995793928, gradient(at: 2.0 as ${T}, in: asinh), ulps: 16) + expectEqualWithTolerance(0.5773502691896257645, gradient(at: 2.0 as ${T}, in: acosh), ulps: 16) + expectEqualWithTolerance(1.3333333333333333334, gradient(at: 0.5 as ${T}, in: atanh), ulps: 16) + expectEqualWithTolerance(0.020666985354092053575, gradient(at: 2.0 as ${T}, in: erf), ulps: 16) + expectEqualWithTolerance(-0.020666985354092053575, gradient(at: 2.0 as ${T}, in: erfc), ulps: 16) + expectEqualWithTolerance(0.35355339059327376222, gradient(at: 2.0 as ${T}, in: { sqrt($0) }), ulps: 16) + let fmaGrad = gradient(at: 4.0 as ${T}, 5.0 as ${T}, 6.0 as ${T}, in: { x, y, z in fma(x, y, z) }) + expectEqualWithTolerance(5.0, fmaGrad.0, ulps: 16) + expectEqualWithTolerance(4.0, fmaGrad.1, ulps: 16) + expectEqualWithTolerance(1.0, fmaGrad.2, ulps: 16) } %end