From 7fb96b186feee4b1ee25639759589ff087914569 Mon Sep 17 00:00:00 2001 From: Bart Chrzaszcz Date: Mon, 13 May 2019 14:23:29 -0700 Subject: [PATCH 01/26] Initial broken version. --- stdlib/public/core/SIMDVectorTypes.swift.gyb | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/stdlib/public/core/SIMDVectorTypes.swift.gyb b/stdlib/public/core/SIMDVectorTypes.swift.gyb index c24556106fe1f..a977feb231b20 100644 --- a/stdlib/public/core/SIMDVectorTypes.swift.gyb +++ b/stdlib/public/core/SIMDVectorTypes.swift.gyb @@ -185,6 +185,15 @@ extension SIMD${n} where Scalar: BinaryFloatingPoint { } } +extension SIMD${n} : AdditiveArithmetic + /*where SIMD${n} : RangeReplaceableCollection, Scalar : FloatingPoint*/ { + public static var zero: SIMD${n} { + return SIMD${n}() + } +} + +extension SIMD${n} : VectorNumeric where Scalar : AdditiveArithmetic {} + %end extension SIMD3 { From d3182a31ea80d99b14d0c92e09ce3066c5c3dce4 Mon Sep 17 00:00:00 2001 From: Bart Chrzaszcz Date: Mon, 13 May 2019 16:03:21 -0700 Subject: [PATCH 02/26] Add VectorView. --- stdlib/public/core/SIMDVectorTypes.swift.gyb | 129 ++++++++++++++++++- 1 file changed, 124 insertions(+), 5 deletions(-) diff --git a/stdlib/public/core/SIMDVectorTypes.swift.gyb b/stdlib/public/core/SIMDVectorTypes.swift.gyb index a977feb231b20..b4389a94a80e1 100644 --- a/stdlib/public/core/SIMDVectorTypes.swift.gyb +++ b/stdlib/public/core/SIMDVectorTypes.swift.gyb @@ -185,14 +185,133 @@ extension SIMD${n} where Scalar: BinaryFloatingPoint { } } -extension SIMD${n} : AdditiveArithmetic - /*where SIMD${n} : RangeReplaceableCollection, Scalar : FloatingPoint*/ { - public static var zero: SIMD${n} { - return SIMD${n}() +extension SIMD${n} + where Scalar : Differentiable, + Scalar.CotangentVector : SIMDScalar, + Scalar.TangentVector : SIMDScalar { + // Same implementation as DifferentiableView in Array.swift + @_fixed_layout // BART_TODO: what's this for? + public struct VectorView : Differentiable { + private var _base: SIMD${n} + + // The viewed SIMD vector + // I'm implementing this as a computed property instead of directly + // exposing `_base` because the `@differentiable` annotation does not make + // the stored property actually differentiable. I think this is a bug. + // Maybe it's related to `@_fixed_layout`? + // TODO: Determine if that is a bug, and fix. + public var base: SIMD${n} { + @differentiable(wrt: self, vjp: _vjpBase) + get { return _base } + _modify { yield &_base } + } + + @usableFromInline + func _vjpBase() -> + (SIMD${n}, (SIMD${n}.CotangentVector) -> CotangentVector) { + return (base, { $0 }) + } + + /// Creates a differentiable view of the given SIMD vector. + @differentiable(wrt: base, vjp: _vjpInit) + public init(_ base: SIMD${n}) { self._base = base } + + @usableFromInline + static func _vjpInit(_ base: SIMD${n}) -> + (SIMD${n}.VectorView, (CotangentVector) -> CotangentVector) { + return (SIMD${n}.VectorView(base), { $0 }) + } + + // MARK: - Differentiable conformance. + + public typealias TangentVector = + SIMD${n}.VectorView + public typealias CotangentVector = + SIMD${n}.VectorView // ??????? + public typealias AllDifferentiableVariables = + SIMD${n}.VectorView + + public var allDifferentiableVariables: AllDifferentiableVariables { + get { + return AllDifferentiableVariables( + base.map { $0.allDifferentiableVariables }) + } + set { + precondition( + base.count == newValue.base.count, + "cannot set \(SIMD${n}).VectorView.AllDifferentiableVariables " + + "with count \(base.count) to " + + "\(SIMD${n}).VectorView.AllDifferentiableVariables with " + + "different count \(newValue.base.count)") + for i in base.indices { + base[i].allDifferentiableVariables = newValue.base[i] + } + } + } + + public func moved(along direction: TangentVector) -> VectorView { + precondition( + base.count == direction.base.count, + "cannot move \(SIMD${n}).VectorView with count \(base.count) along " + + "direction with different count \(direction.base.count)") + return VectorView( + zip(base, direction.base).map { $0.moved(along: $1) }) + } + + public func tangentVector(from cotangentVector: CotangentVector) -> + TangentVector { + precondition( + base.count == cotangentVector.base.count, + "cannot use \(SIMD${n}).VectorView with count \(base.count) to " + + "get tangentVector from cotangentVector with different count " + + "\(cotangentVector.base.count)") + return TangentVector(zip(base, cotangentVector.base).map { + (selfElement, cotangentVectorElement) in + selfElement.tangentVector(from: cotangentVectorElement) + }) + } } } -extension SIMD${n} : VectorNumeric where Scalar : AdditiveArithmetic {} +/// Makes `SIMD${n}` differentiable as the product manifold of `Scalar` +/// multiplied with itself `count` times. +extension SIMD${n} : Differentiable + where Scalar : Differentiable, + Scalar.CotangentVector : SIMDScalar, + Scalar.TangentVector : SIMDScalar { + // In an ideal world, `TangentVector`, `CotangentVector`, and + // `AllDifferentiableVariables` would all be `SIMD${n}`s. Unfortunately, we + // can't conform `SIMD${n}` to `AdditiveArithmetic` for `TangentVector` and + // `CotangentVector`, because `SIMD${n}` already has a static `+` method with + // different semantics from `AdditiveArithmetic` `+`. So we use + // `SIMD${n}.VectorView` for all these associated types. + public typealias TangentVector = + SIMD${n}.VectorView + public typealias CotangentVector = + SIMD${n}.VectorView + public typealias AllDifferentiableVariables = + SIMD${n}.VectorView + + public var allDifferentiableVariables: AllDifferentiableVariables { + get { + return VectorView(self).allDifferentiableVariables + } + set { + var view = VectorView(self) + view.allDifferentiableVariables = newValue + self = view.base + } + } + + public func moved(along direction: TangentVector) -> Array { + return VectorView(self).moved(along: direction).base + } + + public func tangentVector(from cotangentVector: CotangentVector) -> + TangentVector { + return VectorView(self).tangentVector(from: cotangentVector) + } +} %end From aa231873f11e8c2dc65a24a76bcae5778303ca0c Mon Sep 17 00:00:00 2001 From: Bart Chrzaszcz Date: Mon, 13 May 2019 17:43:45 -0700 Subject: [PATCH 03/26] WIP while waiting for swift forums answer. --- stdlib/public/core/SIMDVectorTypes.swift.gyb | 256 ++++++++++--------- 1 file changed, 133 insertions(+), 123 deletions(-) diff --git a/stdlib/public/core/SIMDVectorTypes.swift.gyb b/stdlib/public/core/SIMDVectorTypes.swift.gyb index b4389a94a80e1..be6cc607c4545 100644 --- a/stdlib/public/core/SIMDVectorTypes.swift.gyb +++ b/stdlib/public/core/SIMDVectorTypes.swift.gyb @@ -185,133 +185,143 @@ extension SIMD${n} where Scalar: BinaryFloatingPoint { } } -extension SIMD${n} - where Scalar : Differentiable, - Scalar.CotangentVector : SIMDScalar, - Scalar.TangentVector : SIMDScalar { - // Same implementation as DifferentiableView in Array.swift - @_fixed_layout // BART_TODO: what's this for? - public struct VectorView : Differentiable { - private var _base: SIMD${n} - - // The viewed SIMD vector - // I'm implementing this as a computed property instead of directly - // exposing `_base` because the `@differentiable` annotation does not make - // the stored property actually differentiable. I think this is a bug. - // Maybe it's related to `@_fixed_layout`? - // TODO: Determine if that is a bug, and fix. - public var base: SIMD${n} { - @differentiable(wrt: self, vjp: _vjpBase) - get { return _base } - _modify { yield &_base } - } - - @usableFromInline - func _vjpBase() -> - (SIMD${n}, (SIMD${n}.CotangentVector) -> CotangentVector) { - return (base, { $0 }) - } - - /// Creates a differentiable view of the given SIMD vector. - @differentiable(wrt: base, vjp: _vjpInit) - public init(_ base: SIMD${n}) { self._base = base } - - @usableFromInline - static func _vjpInit(_ base: SIMD${n}) -> - (SIMD${n}.VectorView, (CotangentVector) -> CotangentVector) { - return (SIMD${n}.VectorView(base), { $0 }) - } - - // MARK: - Differentiable conformance. - - public typealias TangentVector = - SIMD${n}.VectorView - public typealias CotangentVector = - SIMD${n}.VectorView // ??????? - public typealias AllDifferentiableVariables = - SIMD${n}.VectorView - - public var allDifferentiableVariables: AllDifferentiableVariables { - get { - return AllDifferentiableVariables( - base.map { $0.allDifferentiableVariables }) - } - set { - precondition( - base.count == newValue.base.count, - "cannot set \(SIMD${n}).VectorView.AllDifferentiableVariables " + - "with count \(base.count) to " + - "\(SIMD${n}).VectorView.AllDifferentiableVariables with " + - "different count \(newValue.base.count)") - for i in base.indices { - base[i].allDifferentiableVariables = newValue.base[i] - } - } - } - - public func moved(along direction: TangentVector) -> VectorView { - precondition( - base.count == direction.base.count, - "cannot move \(SIMD${n}).VectorView with count \(base.count) along " + - "direction with different count \(direction.base.count)") - return VectorView( - zip(base, direction.base).map { $0.moved(along: $1) }) - } - - public func tangentVector(from cotangentVector: CotangentVector) -> - TangentVector { - precondition( - base.count == cotangentVector.base.count, - "cannot use \(SIMD${n}).VectorView with count \(base.count) to " + - "get tangentVector from cotangentVector with different count " + - "\(cotangentVector.base.count)") - return TangentVector(zip(base, cotangentVector.base).map { - (selfElement, cotangentVectorElement) in - selfElement.tangentVector(from: cotangentVectorElement) - }) - } +// Don't think I need to conform Scalar to SIMDScalar since it already is being done +// it seems in SIMDVectorTypes.swift +extension SIMD${n} : AdditiveArithmetic where Scalar: FloatingPoint /*& SIMDScalar*/ { + public static var zero: SIMD${n} { + return SIMD${n}() } } -/// Makes `SIMD${n}` differentiable as the product manifold of `Scalar` -/// multiplied with itself `count` times. -extension SIMD${n} : Differentiable - where Scalar : Differentiable, - Scalar.CotangentVector : SIMDScalar, - Scalar.TangentVector : SIMDScalar { - // In an ideal world, `TangentVector`, `CotangentVector`, and - // `AllDifferentiableVariables` would all be `SIMD${n}`s. Unfortunately, we - // can't conform `SIMD${n}` to `AdditiveArithmetic` for `TangentVector` and - // `CotangentVector`, because `SIMD${n}` already has a static `+` method with - // different semantics from `AdditiveArithmetic` `+`. So we use - // `SIMD${n}.VectorView` for all these associated types. - public typealias TangentVector = - SIMD${n}.VectorView - public typealias CotangentVector = - SIMD${n}.VectorView - public typealias AllDifferentiableVariables = - SIMD${n}.VectorView - - public var allDifferentiableVariables: AllDifferentiableVariables { - get { - return VectorView(self).allDifferentiableVariables - } - set { - var view = VectorView(self) - view.allDifferentiableVariables = newValue - self = view.base - } - } - - public func moved(along direction: TangentVector) -> Array { - return VectorView(self).moved(along: direction).base - } +//extension SIMD${n} : VectorNumeric where Scalar : AdditiveArithmetic & FloatingPoint {} - public func tangentVector(from cotangentVector: CotangentVector) -> - TangentVector { - return VectorView(self).tangentVector(from: cotangentVector) - } -} +//extension SIMD${n} +// where Scalar : Differentiable, +// Scalar.CotangentVector : SIMDScalar, +// Scalar.TangentVector : SIMDScalar { +// // Same implementation as DifferentiableView in Array.swift +// @_fixed_layout // BART_TODO: what's this for? +// public struct VectorView : Differentiable { +// private var _base: SIMD${n} +// +// // The viewed SIMD vector +// // I'm implementing this as a computed property instead of directly +// // exposing `_base` because the `@differentiable` annotation does not make +// // the stored property actually differentiable. I think this is a bug. +// // Maybe it's related to `@_fixed_layout`? +// // TODO: Determine if that is a bug, and fix. +// public var base: SIMD${n} { +// @differentiable(wrt: self, vjp: _vjpBase) +// get { return _base } +// _modify { yield &_base } +// } +// +// @usableFromInline +// func _vjpBase() -> +// (SIMD${n}, (SIMD${n}.CotangentVector) -> CotangentVector) { +// return (base, { $0 }) +// } +// +// /// Creates a differentiable view of the given SIMD vector. +// @differentiable(wrt: base, vjp: _vjpInit) +// public init(_ base: SIMD${n}) { self._base = base } +// +// @usableFromInline +// static func _vjpInit(_ base: SIMD${n}) -> +// (SIMD${n}.VectorView, (CotangentVector) -> CotangentVector) { +// return (SIMD${n}.VectorView(base), { $0 }) +// } +// +// // MARK: - Differentiable conformance. +// +// public typealias TangentVector = +// SIMD${n}.VectorView +// public typealias CotangentVector = +// SIMD${n}.VectorView // ??????? +// public typealias AllDifferentiableVariables = +// SIMD${n}.VectorView +// +// public var allDifferentiableVariables: AllDifferentiableVariables { +// get { +// return AllDifferentiableVariables( +// base.map { $0.allDifferentiableVariables }) +// } +// set { +// precondition( +// base.count == newValue.base.count, +// "cannot set \(SIMD${n}).VectorView.AllDifferentiableVariables " + +// "with count \(base.count) to " + +// "\(SIMD${n}).VectorView.AllDifferentiableVariables with " + +// "different count \(newValue.base.count)") +// for i in base.indices { +// base[i].allDifferentiableVariables = newValue.base[i] +// } +// } +// } +// +// public func moved(along direction: TangentVector) -> VectorView { +// precondition( +// base.count == direction.base.count, +// "cannot move \(SIMD${n}).VectorView with count \(base.count) along " + +// "direction with different count \(direction.base.count)") +// return VectorView( +// zip(base, direction.base).map { $0.moved(along: $1) }) +// } +// +// public func tangentVector(from cotangentVector: CotangentVector) -> +// TangentVector { +// precondition( +// base.count == cotangentVector.base.count, +// "cannot use \(SIMD${n}).VectorView with count \(base.count) to " + +// "get tangentVector from cotangentVector with different count " + +// "\(cotangentVector.base.count)") +// return TangentVector(zip(base, cotangentVector.base).map { +// (selfElement, cotangentVectorElement) in +// selfElement.tangentVector(from: cotangentVectorElement) +// }) +// } +// } +//} +// +///// Makes `SIMD${n}` differentiable as the product manifold of `Scalar` +///// multiplied with itself `count` times. +//extension SIMD${n} : Differentiable +// where Scalar : Differentiable, +// Scalar.CotangentVector : SIMDScalar, +// Scalar.TangentVector : SIMDScalar { +// // In an ideal world, `TangentVector`, `CotangentVector`, and +// // `AllDifferentiableVariables` would all be `SIMD${n}`s. Unfortunately, we +// // can't conform `SIMD${n}` to `AdditiveArithmetic` for `TangentVector` and +// // `CotangentVector`, because `SIMD${n}` already has a static `+` method with +// // different semantics from `AdditiveArithmetic` `+`. So we use +// // `SIMD${n}.VectorView` for all these associated types. +// public typealias TangentVector = +// SIMD${n}.VectorView +// public typealias CotangentVector = +// SIMD${n}.VectorView +// public typealias AllDifferentiableVariables = +// SIMD${n}.VectorView +// +// public var allDifferentiableVariables: AllDifferentiableVariables { +// get { +// return VectorView(self).allDifferentiableVariables +// } +// set { +// var view = VectorView(self) +// view.allDifferentiableVariables = newValue +// self = view.base +// } +// } +// +// public func moved(along direction: TangentVector) -> Array { +// return VectorView(self).moved(along: direction).base +// } +// +// public func tangentVector(from cotangentVector: CotangentVector) -> +// TangentVector { +// return VectorView(self).tangentVector(from: cotangentVector) +// } +//} %end From fe383628a7b0d5fcb908ab5a2ade8ee53750623b Mon Sep 17 00:00:00 2001 From: Bart Chrzaszcz Date: Tue, 14 May 2019 12:45:32 -0700 Subject: [PATCH 04/26] WIP: removed from AdditiveArithmetic, kept in CIMD. --- stdlib/public/core/Array.swift | 8 ++ stdlib/public/core/AutoDiff.swift | 20 ++- stdlib/public/core/Integers.swift | 18 +-- stdlib/public/core/SIMDVectorTypes.swift.gyb | 138 +------------------ 4 files changed, 41 insertions(+), 143 deletions(-) diff --git a/stdlib/public/core/Array.swift b/stdlib/public/core/Array.swift index afa4ce43e52a8..f2bf72ca531a1 100644 --- a/stdlib/public/core/Array.swift +++ b/stdlib/public/core/Array.swift @@ -2054,6 +2054,14 @@ extension Array.DifferentiableView : AdditiveArithmetic } return Array.DifferentiableView(zip(lhs.base, rhs.base).map(-)) } + + public static func +=(lhs: inout Self, rhs: Self) { + lhs = lhs + rhs + } + + public static func -=(lhs: inout Self, rhs: Self) { + lhs = lhs - rhs + } } /// Makes `Array` differentiable as the product manifold of `Element` diff --git a/stdlib/public/core/AutoDiff.swift b/stdlib/public/core/AutoDiff.swift index 62b35c30f1026..eb612725fd1d8 100644 --- a/stdlib/public/core/AutoDiff.swift +++ b/stdlib/public/core/AutoDiff.swift @@ -718,7 +718,17 @@ public struct AnyDerivative : Differentiable & AdditiveArithmetic { /// Internal struct representing an opaque zero value. @_fixed_layout @usableFromInline - internal struct OpaqueZero : Differentiable & AdditiveArithmetic {} + internal struct OpaqueZero : Differentiable & AdditiveArithmetic { + @usableFromInline + static func +=(lhs: inout Self, rhs: Self) { + lhs = lhs + rhs + } + + @usableFromInline + static func -=(lhs: inout Self, rhs: Self) { + lhs = lhs - rhs + } + } public static var zero: AnyDerivative { return AnyDerivative( @@ -752,6 +762,14 @@ public struct AnyDerivative : Differentiable & AdditiveArithmetic { pullback: (AnyDerivative) -> (AnyDerivative, AnyDerivative)) { return (lhs - rhs, { v in (v, .zero - v) }) } + + public static func +=(lhs: inout Self, rhs: Self) { + lhs = lhs + rhs + } + + public static func -=(lhs: inout Self, rhs: Self) { + lhs = lhs - rhs + } // `Differentiable` requirements. public var allDifferentiableVariables: AllDifferentiableVariables { diff --git a/stdlib/public/core/Integers.swift b/stdlib/public/core/Integers.swift index df46cf72e94b2..3456506dfa4e3 100644 --- a/stdlib/public/core/Integers.swift +++ b/stdlib/public/core/Integers.swift @@ -128,15 +128,15 @@ public protocol AdditiveArithmetic : Equatable { static func -=(lhs: inout Self, rhs: Self) } -public extension AdditiveArithmetic { - static func +=(lhs: inout Self, rhs: Self) { - lhs = lhs + rhs - } - - static func -=(lhs: inout Self, rhs: Self) { - lhs = lhs - rhs - } -} +//public extension AdditiveArithmetic { +// static func +=(lhs: inout Self, rhs: Self) { +// lhs = lhs + rhs +// } +// +// static func -=(lhs: inout Self, rhs: Self) { +// lhs = lhs - rhs +// } +//} public extension AdditiveArithmetic where Self : ExpressibleByIntegerLiteral { /// The zero value. diff --git a/stdlib/public/core/SIMDVectorTypes.swift.gyb b/stdlib/public/core/SIMDVectorTypes.swift.gyb index be6cc607c4545..55f2db9659a97 100644 --- a/stdlib/public/core/SIMDVectorTypes.swift.gyb +++ b/stdlib/public/core/SIMDVectorTypes.swift.gyb @@ -187,142 +187,14 @@ extension SIMD${n} where Scalar: BinaryFloatingPoint { // Don't think I need to conform Scalar to SIMDScalar since it already is being done // it seems in SIMDVectorTypes.swift -extension SIMD${n} : AdditiveArithmetic where Scalar: FloatingPoint /*& SIMDScalar*/ { - public static var zero: SIMD${n} { - return SIMD${n}() - } -} - -//extension SIMD${n} : VectorNumeric where Scalar : AdditiveArithmetic & FloatingPoint {} - -//extension SIMD${n} -// where Scalar : Differentiable, -// Scalar.CotangentVector : SIMDScalar, -// Scalar.TangentVector : SIMDScalar { -// // Same implementation as DifferentiableView in Array.swift -// @_fixed_layout // BART_TODO: what's this for? -// public struct VectorView : Differentiable { -// private var _base: SIMD${n} -// -// // The viewed SIMD vector -// // I'm implementing this as a computed property instead of directly -// // exposing `_base` because the `@differentiable` annotation does not make -// // the stored property actually differentiable. I think this is a bug. -// // Maybe it's related to `@_fixed_layout`? -// // TODO: Determine if that is a bug, and fix. -// public var base: SIMD${n} { -// @differentiable(wrt: self, vjp: _vjpBase) -// get { return _base } -// _modify { yield &_base } -// } -// -// @usableFromInline -// func _vjpBase() -> -// (SIMD${n}, (SIMD${n}.CotangentVector) -> CotangentVector) { -// return (base, { $0 }) -// } -// -// /// Creates a differentiable view of the given SIMD vector. -// @differentiable(wrt: base, vjp: _vjpInit) -// public init(_ base: SIMD${n}) { self._base = base } -// -// @usableFromInline -// static func _vjpInit(_ base: SIMD${n}) -> -// (SIMD${n}.VectorView, (CotangentVector) -> CotangentVector) { -// return (SIMD${n}.VectorView(base), { $0 }) -// } -// -// // MARK: - Differentiable conformance. -// -// public typealias TangentVector = -// SIMD${n}.VectorView -// public typealias CotangentVector = -// SIMD${n}.VectorView // ??????? -// public typealias AllDifferentiableVariables = -// SIMD${n}.VectorView -// -// public var allDifferentiableVariables: AllDifferentiableVariables { -// get { -// return AllDifferentiableVariables( -// base.map { $0.allDifferentiableVariables }) -// } -// set { -// precondition( -// base.count == newValue.base.count, -// "cannot set \(SIMD${n}).VectorView.AllDifferentiableVariables " + -// "with count \(base.count) to " + -// "\(SIMD${n}).VectorView.AllDifferentiableVariables with " + -// "different count \(newValue.base.count)") -// for i in base.indices { -// base[i].allDifferentiableVariables = newValue.base[i] -// } -// } -// } -// -// public func moved(along direction: TangentVector) -> VectorView { -// precondition( -// base.count == direction.base.count, -// "cannot move \(SIMD${n}).VectorView with count \(base.count) along " + -// "direction with different count \(direction.base.count)") -// return VectorView( -// zip(base, direction.base).map { $0.moved(along: $1) }) -// } -// -// public func tangentVector(from cotangentVector: CotangentVector) -> -// TangentVector { -// precondition( -// base.count == cotangentVector.base.count, -// "cannot use \(SIMD${n}).VectorView with count \(base.count) to " + -// "get tangentVector from cotangentVector with different count " + -// "\(cotangentVector.base.count)") -// return TangentVector(zip(base, cotangentVector.base).map { -// (selfElement, cotangentVectorElement) in -// selfElement.tangentVector(from: cotangentVectorElement) -// }) -// } -// } -//} -// -///// Makes `SIMD${n}` differentiable as the product manifold of `Scalar` -///// multiplied with itself `count` times. -//extension SIMD${n} : Differentiable -// where Scalar : Differentiable, -// Scalar.CotangentVector : SIMDScalar, -// Scalar.TangentVector : SIMDScalar { -// // In an ideal world, `TangentVector`, `CotangentVector`, and -// // `AllDifferentiableVariables` would all be `SIMD${n}`s. Unfortunately, we -// // can't conform `SIMD${n}` to `AdditiveArithmetic` for `TangentVector` and -// // `CotangentVector`, because `SIMD${n}` already has a static `+` method with -// // different semantics from `AdditiveArithmetic` `+`. So we use -// // `SIMD${n}.VectorView` for all these associated types. -// public typealias TangentVector = -// SIMD${n}.VectorView -// public typealias CotangentVector = -// SIMD${n}.VectorView -// public typealias AllDifferentiableVariables = -// SIMD${n}.VectorView -// -// public var allDifferentiableVariables: AllDifferentiableVariables { -// get { -// return VectorView(self).allDifferentiableVariables -// } -// set { -// var view = VectorView(self) -// view.allDifferentiableVariables = newValue -// self = view.base -// } -// } -// -// public func moved(along direction: TangentVector) -> Array { -// return VectorView(self).moved(along: direction).base -// } -// -// public func tangentVector(from cotangentVector: CotangentVector) -> -// TangentVector { -// return VectorView(self).tangentVector(from: cotangentVector) +//extension SIMD${n} : AdditiveArithmetic where Scalar: BinaryFloatingPoint { +// public static var zero: SIMD${n} { +// return SIMD${n}() // } //} +//extension SIMD${n} : VectorNumeric where Scalar : AdditiveArithmetic & FloatingPoint {} + %end extension SIMD3 { From 5f109a035080de063e171f65d293737113e08720 Mon Sep 17 00:00:00 2001 From: Bart Chrzaszcz Date: Tue, 14 May 2019 12:55:38 -0700 Subject: [PATCH 05/26] Remove += from SIMD protocol, move it to Struct SIMDn. --- stdlib/public/core/Array.swift | 8 -------- stdlib/public/core/AutoDiff.swift | 20 +------------------- stdlib/public/core/Integers.swift | 18 +++++++++--------- stdlib/public/core/SIMDVector.swift | 10 ---------- stdlib/public/core/SIMDVectorTypes.swift.gyb | 10 +++++----- 5 files changed, 15 insertions(+), 51 deletions(-) diff --git a/stdlib/public/core/Array.swift b/stdlib/public/core/Array.swift index f2bf72ca531a1..afa4ce43e52a8 100644 --- a/stdlib/public/core/Array.swift +++ b/stdlib/public/core/Array.swift @@ -2054,14 +2054,6 @@ extension Array.DifferentiableView : AdditiveArithmetic } return Array.DifferentiableView(zip(lhs.base, rhs.base).map(-)) } - - public static func +=(lhs: inout Self, rhs: Self) { - lhs = lhs + rhs - } - - public static func -=(lhs: inout Self, rhs: Self) { - lhs = lhs - rhs - } } /// Makes `Array` differentiable as the product manifold of `Element` diff --git a/stdlib/public/core/AutoDiff.swift b/stdlib/public/core/AutoDiff.swift index eb612725fd1d8..62b35c30f1026 100644 --- a/stdlib/public/core/AutoDiff.swift +++ b/stdlib/public/core/AutoDiff.swift @@ -718,17 +718,7 @@ public struct AnyDerivative : Differentiable & AdditiveArithmetic { /// Internal struct representing an opaque zero value. @_fixed_layout @usableFromInline - internal struct OpaqueZero : Differentiable & AdditiveArithmetic { - @usableFromInline - static func +=(lhs: inout Self, rhs: Self) { - lhs = lhs + rhs - } - - @usableFromInline - static func -=(lhs: inout Self, rhs: Self) { - lhs = lhs - rhs - } - } + internal struct OpaqueZero : Differentiable & AdditiveArithmetic {} public static var zero: AnyDerivative { return AnyDerivative( @@ -762,14 +752,6 @@ public struct AnyDerivative : Differentiable & AdditiveArithmetic { pullback: (AnyDerivative) -> (AnyDerivative, AnyDerivative)) { return (lhs - rhs, { v in (v, .zero - v) }) } - - public static func +=(lhs: inout Self, rhs: Self) { - lhs = lhs + rhs - } - - public static func -=(lhs: inout Self, rhs: Self) { - lhs = lhs - rhs - } // `Differentiable` requirements. public var allDifferentiableVariables: AllDifferentiableVariables { diff --git a/stdlib/public/core/Integers.swift b/stdlib/public/core/Integers.swift index 3456506dfa4e3..df46cf72e94b2 100644 --- a/stdlib/public/core/Integers.swift +++ b/stdlib/public/core/Integers.swift @@ -128,15 +128,15 @@ public protocol AdditiveArithmetic : Equatable { static func -=(lhs: inout Self, rhs: Self) } -//public extension AdditiveArithmetic { -// static func +=(lhs: inout Self, rhs: Self) { -// lhs = lhs + rhs -// } -// -// static func -=(lhs: inout Self, rhs: Self) { -// lhs = lhs - rhs -// } -//} +public extension AdditiveArithmetic { + static func +=(lhs: inout Self, rhs: Self) { + lhs = lhs + rhs + } + + static func -=(lhs: inout Self, rhs: Self) { + lhs = lhs - rhs + } +} public extension AdditiveArithmetic where Self : ExpressibleByIntegerLiteral { /// The zero value. diff --git a/stdlib/public/core/SIMDVector.swift b/stdlib/public/core/SIMDVector.swift index 54fcc55d79165..b1277657310ac 100644 --- a/stdlib/public/core/SIMDVector.swift +++ b/stdlib/public/core/SIMDVector.swift @@ -1201,16 +1201,6 @@ extension SIMD where Scalar: FloatingPoint { return lhs / Self(repeating: rhs) } - @_transparent - public static func +=(lhs: inout Self, rhs: Self) { - lhs = lhs + rhs - } - - @_transparent - public static func -=(lhs: inout Self, rhs: Self) { - lhs = lhs - rhs - } - @_transparent public static func *=(lhs: inout Self, rhs: Self) { lhs = lhs * rhs diff --git a/stdlib/public/core/SIMDVectorTypes.swift.gyb b/stdlib/public/core/SIMDVectorTypes.swift.gyb index 55f2db9659a97..7002b52ac2f28 100644 --- a/stdlib/public/core/SIMDVectorTypes.swift.gyb +++ b/stdlib/public/core/SIMDVectorTypes.swift.gyb @@ -187,11 +187,11 @@ extension SIMD${n} where Scalar: BinaryFloatingPoint { // Don't think I need to conform Scalar to SIMDScalar since it already is being done // it seems in SIMDVectorTypes.swift -//extension SIMD${n} : AdditiveArithmetic where Scalar: BinaryFloatingPoint { -// public static var zero: SIMD${n} { -// return SIMD${n}() -// } -//} +extension SIMD${n} : AdditiveArithmetic where Scalar: BinaryFloatingPoint { + public static var zero: SIMD${n} { + return SIMD${n}() + } +} //extension SIMD${n} : VectorNumeric where Scalar : AdditiveArithmetic & FloatingPoint {} From 304026b9f0c73f758ad489315aac3be409a7c73a Mon Sep 17 00:00:00 2001 From: Bart Chrzaszcz Date: Tue, 14 May 2019 15:16:43 -0700 Subject: [PATCH 06/26] Have SIMDn conform to Differentiable. --- stdlib/public/core/SIMDVector.swift | 5 ----- stdlib/public/core/SIMDVectorTypes.swift.gyb | 15 +++++++++++---- 2 files changed, 11 insertions(+), 9 deletions(-) diff --git a/stdlib/public/core/SIMDVector.swift b/stdlib/public/core/SIMDVector.swift index b1277657310ac..56d5630036ebc 100644 --- a/stdlib/public/core/SIMDVector.swift +++ b/stdlib/public/core/SIMDVector.swift @@ -1221,11 +1221,6 @@ extension SIMD where Scalar: FloatingPoint { lhs = lhs - rhs } - @_transparent - public static func *=(lhs: inout Self, rhs: Scalar) { - lhs = lhs * rhs - } - @_transparent public static func /=(lhs: inout Self, rhs: Scalar) { lhs = lhs / rhs diff --git a/stdlib/public/core/SIMDVectorTypes.swift.gyb b/stdlib/public/core/SIMDVectorTypes.swift.gyb index 7002b52ac2f28..1065ba075f78a 100644 --- a/stdlib/public/core/SIMDVectorTypes.swift.gyb +++ b/stdlib/public/core/SIMDVectorTypes.swift.gyb @@ -185,15 +185,22 @@ extension SIMD${n} where Scalar: BinaryFloatingPoint { } } -// Don't think I need to conform Scalar to SIMDScalar since it already is being done -// it seems in SIMDVectorTypes.swift -extension SIMD${n} : AdditiveArithmetic where Scalar: BinaryFloatingPoint { +extension SIMD${n} : AdditiveArithmetic where Scalar: FloatingPoint { public static var zero: SIMD${n} { return SIMD${n}() } } -//extension SIMD${n} : VectorNumeric where Scalar : AdditiveArithmetic & FloatingPoint {} +extension SIMD${n} : VectorNumeric where Scalar : FloatingPoint {} + +extension SIMD${n} : Differentiable where Scalar : Differentiable & FloatingPoint { + public typealias TangentVector = SIMD${n} + public typealias CotangentVector = SIMD${n} + public typealias AllDifferentiableVariables = SIMD${n} + public func tangentVector(from cotangent: CotangentVector) -> TangentVector { + return cotangent + } +} %end From b75db945d87e11368992d62e65316731906a6e47 Mon Sep 17 00:00:00 2001 From: Bart Chrzaszcz Date: Tue, 14 May 2019 15:56:53 -0700 Subject: [PATCH 07/26] Forgot to remove zero, already implemented in SIMDVector.swift --- stdlib/public/core/SIMDVectorTypes.swift.gyb | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/stdlib/public/core/SIMDVectorTypes.swift.gyb b/stdlib/public/core/SIMDVectorTypes.swift.gyb index 1065ba075f78a..ea1cf8d482487 100644 --- a/stdlib/public/core/SIMDVectorTypes.swift.gyb +++ b/stdlib/public/core/SIMDVectorTypes.swift.gyb @@ -185,11 +185,7 @@ extension SIMD${n} where Scalar: BinaryFloatingPoint { } } -extension SIMD${n} : AdditiveArithmetic where Scalar: FloatingPoint { - public static var zero: SIMD${n} { - return SIMD${n}() - } -} +extension SIMD${n} : AdditiveArithmetic where Scalar: FloatingPoint {} extension SIMD${n} : VectorNumeric where Scalar : FloatingPoint {} From d67acef011b88275417ec7db4d4decb6297c0404 Mon Sep 17 00:00:00 2001 From: Bart Chrzaszcz Date: Tue, 14 May 2019 16:55:25 -0700 Subject: [PATCH 08/26] PR fix: spacing. --- stdlib/public/core/SIMDVectorTypes.swift.gyb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stdlib/public/core/SIMDVectorTypes.swift.gyb b/stdlib/public/core/SIMDVectorTypes.swift.gyb index ea1cf8d482487..6f114419228fa 100644 --- a/stdlib/public/core/SIMDVectorTypes.swift.gyb +++ b/stdlib/public/core/SIMDVectorTypes.swift.gyb @@ -185,7 +185,7 @@ extension SIMD${n} where Scalar: BinaryFloatingPoint { } } -extension SIMD${n} : AdditiveArithmetic where Scalar: FloatingPoint {} +extension SIMD${n} : AdditiveArithmetic where Scalar : FloatingPoint {} extension SIMD${n} : VectorNumeric where Scalar : FloatingPoint {} From 17f52bf85ff87d724fb3e326125c06249f0c953e Mon Sep 17 00:00:00 2001 From: Bart Chrzaszcz Date: Tue, 14 May 2019 22:36:16 -0700 Subject: [PATCH 09/26] Start making + differentiable --- stdlib/public/core/SIMDVector.swift | 35 +++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) diff --git a/stdlib/public/core/SIMDVector.swift b/stdlib/public/core/SIMDVector.swift index 56d5630036ebc..e7b7e4eda048b 100644 --- a/stdlib/public/core/SIMDVector.swift +++ b/stdlib/public/core/SIMDVector.swift @@ -781,6 +781,7 @@ extension SIMD where Scalar: FixedWidthInteger { // be replaced with @_semantics to lower directly to vector IR nodes. extension SIMD where Scalar: FloatingPoint { @_transparent + @differentiable(vjp: _vjpAdd(lhs:rhs:) where Self: Differentiable, Self.CotangentVector == Self) public static func +(lhs: Self, rhs: Self) -> Self { var result = Self() for i in result.indices { result[i] = lhs[i] + rhs[i] } @@ -1162,6 +1163,11 @@ extension SIMD where Scalar: FloatingPoint { } @_transparent + @differentiable(vjp: _vjpAdd(lhs:rhs:) + where Scalar: Differentiable & FixedWidthInteger, + Self: Differentiable, + Self.CotangentVector == Self, + Scalar.CotangentVector == Scalar) public static func +(lhs: Scalar, rhs: Self) -> Self { return Self(repeating: lhs) + rhs } @@ -1392,3 +1398,32 @@ where T: SIMD, T.Scalar: FloatingPoint { } return result } + +extension SIMD + where Self: Differentiable, + Scalar : FloatingPoint, + CotangentVector == Self { + @inlinable + static func _vjpAdd( + lhs: Self, rhs: Self + ) -> (Self, (Self) -> (Self, Self)) { + return (lhs + rhs, { v in + return (v, v) + }) + } +} + +extension SIMD + where Self: Differentiable, + Scalar : FloatingPoint & Differentiable & FixedWidthInteger, + Self.CotangentVector == Self, + Scalar.CotangentVector == Scalar { + @inlinable + static func _vjpAdd( + lhs: Scalar, rhs: Self + ) -> (Self, (Self) -> (Scalar, Self)) { + return (lhs + rhs, { v in + return (v.wrappedSum(), v) + }) + } +} From 031f38472774cfedc2abe6fb75385334677f138c Mon Sep 17 00:00:00 2001 From: Bart Chrzaszcz Date: Wed, 15 May 2019 09:49:08 -0700 Subject: [PATCH 10/26] Get + and - differentiable. --- stdlib/public/core/SIMDVector.swift | 86 +++++++++++++++++++++++++++-- 1 file changed, 81 insertions(+), 5 deletions(-) diff --git a/stdlib/public/core/SIMDVector.swift b/stdlib/public/core/SIMDVector.swift index e7b7e4eda048b..151d24f14b5af 100644 --- a/stdlib/public/core/SIMDVector.swift +++ b/stdlib/public/core/SIMDVector.swift @@ -781,7 +781,8 @@ extension SIMD where Scalar: FixedWidthInteger { // be replaced with @_semantics to lower directly to vector IR nodes. extension SIMD where Scalar: FloatingPoint { @_transparent - @differentiable(vjp: _vjpAdd(lhs:rhs:) where Self: Differentiable, Self.CotangentVector == Self) + @differentiable(vjp: _vjpAdd(lhs:rhs:) + where Self: Differentiable, Self.CotangentVector == Self) public static func +(lhs: Self, rhs: Self) -> Self { var result = Self() for i in result.indices { result[i] = lhs[i] + rhs[i] } @@ -789,6 +790,8 @@ extension SIMD where Scalar: FloatingPoint { } @_transparent + @differentiable(vjp: _vjpSubtract(lhs:rhs:) + where Self: Differentiable, Self.CotangentVector == Self) public static func -(lhs: Self, rhs: Self) -> Self { var result = Self() for i in result.indices { result[i] = lhs[i] - rhs[i] } @@ -844,6 +847,11 @@ extension SIMD where Scalar: FloatingPoint { /// Returns the sum of the scalars in the vector. @_alwaysEmitIntoClient + @differentiable(vjp: _vjpSum + where Self: Differentiable, + Scalar: Differentiable, + Scalar.CotangentVector == Scalar, + Self.CotangentVector == Self) public func sum() -> Scalar { // Implementation note: this eventually be defined to lower to either // llvm.experimental.vector.reduce.fadd or an explicit tree-sum. Open- @@ -1157,14 +1165,14 @@ extension SIMD where Scalar: FixedWidthInteger { extension SIMD where Scalar: FloatingPoint { - @_transparent + @_transparent // ???? public static prefix func -(rhs: Self) -> Self { return 0 - rhs } @_transparent @differentiable(vjp: _vjpAdd(lhs:rhs:) - where Scalar: Differentiable & FixedWidthInteger, + where Scalar: Differentiable, Self: Differentiable, Self.CotangentVector == Self, Scalar.CotangentVector == Scalar) @@ -1173,6 +1181,11 @@ extension SIMD where Scalar: FloatingPoint { } @_transparent + @differentiable(vjp: _vjpAdd(lhs:rhs:) + where Scalar: Differentiable, + Self: Differentiable, + Self.CotangentVector == Self, + Scalar.CotangentVector == Scalar) public static func -(lhs: Scalar, rhs: Self) -> Self { return Self(repeating: lhs) - rhs } @@ -1188,11 +1201,21 @@ extension SIMD where Scalar: FloatingPoint { } @_transparent + @differentiable(vjp: _vjpAdd(lhs:rhs:) + where Scalar: Differentiable, + Self: Differentiable, + Self.CotangentVector == Self, + Scalar.CotangentVector == Scalar) public static func +(lhs: Self, rhs: Scalar) -> Self { return lhs + Self(repeating: rhs) } @_transparent + @differentiable(vjp: _vjpSubtract(lhs:rhs:) + where Scalar: Differentiable, + Self: Differentiable, + Self.CotangentVector == Self, + Scalar.CotangentVector == Scalar) public static func -(lhs: Self, rhs: Scalar) -> Self { return lhs - Self(repeating: rhs) } @@ -1399,6 +1422,13 @@ where T: SIMD, T.Scalar: FloatingPoint { return result } +//public protocol SIMDDifferentiable : Differentiable +// where Scalar : FloatingPoint, +// Self == Self.TangentVector, +// Self == Self.CotangentVector, +// Self == Self.AllDifferentiableVariables {} + + extension SIMD where Self: Differentiable, Scalar : FloatingPoint, @@ -1411,11 +1441,20 @@ extension SIMD return (v, v) }) } + + @inlinable + static func _vjpSubtract( + lhs: Self, rhs: Self + ) -> (Self, (Self) -> (Self, Self)) { + return (lhs - rhs, { v in + return (v, -v) + }) + } } extension SIMD where Self: Differentiable, - Scalar : FloatingPoint & Differentiable & FixedWidthInteger, + Scalar : FloatingPoint & Differentiable, Self.CotangentVector == Self, Scalar.CotangentVector == Scalar { @inlinable @@ -1423,7 +1462,44 @@ extension SIMD lhs: Scalar, rhs: Self ) -> (Self, (Self) -> (Scalar, Self)) { return (lhs + rhs, { v in - return (v.wrappedSum(), v) + return (v.sum(), v) + }) + } + + @inlinable + static func _vjpSubtract( + lhs: Scalar, rhs: Self + ) -> (Self, (Self) -> (Scalar, Self)) { + return (lhs + rhs, { v in + return (v.sum(), -v) + }) + } + + @inlinable + static func _vjpAdd( + lhs: Self, rhs: Scalar + ) -> (Self, (Self) -> (Self, Scalar)) { + return (lhs + rhs, { v in + return (v, v.sum()) + }) + } + + @inlinable + static func _vjpSubtract( + lhs: Self, rhs: Scalar + ) -> (Self, (Self) -> (Self, Scalar)) { + return (lhs + rhs, { v in + return (v, -v.sum()) }) } } + +extension SIMD + where Self: Differentiable, + Scalar: Differentiable & FloatingPoint, + Scalar.CotangentVector == Scalar, + Self.CotangentVector == Self { + public static func _vjpSum() -> (Scalar, (Scalar) -> Self) { + return (self.sum(), { v in SIMD(repeating: v) }) + } +} From 83e7f753a9feaa1d5d8bb702473803610740b019 Mon Sep 17 00:00:00 2001 From: Bart Chrzaszcz Date: Wed, 15 May 2019 11:04:20 -0700 Subject: [PATCH 11/26] WIP --- stdlib/public/core/SIMDVector.swift | 80 +++++++++++++++-------------- 1 file changed, 41 insertions(+), 39 deletions(-) diff --git a/stdlib/public/core/SIMDVector.swift b/stdlib/public/core/SIMDVector.swift index 151d24f14b5af..56c7f7986ccc9 100644 --- a/stdlib/public/core/SIMDVector.swift +++ b/stdlib/public/core/SIMDVector.swift @@ -213,6 +213,9 @@ extension SIMD { /// in this vector. Because of this, the index is always in-range and no trap /// can occur. @_alwaysEmitIntoClient +// @differentiable(vjp: _vjpSubscript(index:) +// where Self : SIMDDifferentiable, +// Scalar : SIMDDifferentiable) public subscript(index: SIMD2) -> SIMD2 where Index: FixedWidthInteger { var result = SIMD2() @@ -782,7 +785,7 @@ extension SIMD where Scalar: FixedWidthInteger { extension SIMD where Scalar: FloatingPoint { @_transparent @differentiable(vjp: _vjpAdd(lhs:rhs:) - where Self: Differentiable, Self.CotangentVector == Self) + where Self : SIMDDifferentiable) public static func +(lhs: Self, rhs: Self) -> Self { var result = Self() for i in result.indices { result[i] = lhs[i] + rhs[i] } @@ -791,7 +794,7 @@ extension SIMD where Scalar: FloatingPoint { @_transparent @differentiable(vjp: _vjpSubtract(lhs:rhs:) - where Self: Differentiable, Self.CotangentVector == Self) + where Self : SIMDDifferentiable) public static func -(lhs: Self, rhs: Self) -> Self { var result = Self() for i in result.indices { result[i] = lhs[i] - rhs[i] } @@ -848,10 +851,8 @@ extension SIMD where Scalar: FloatingPoint { /// Returns the sum of the scalars in the vector. @_alwaysEmitIntoClient @differentiable(vjp: _vjpSum - where Self: Differentiable, - Scalar: Differentiable, - Scalar.CotangentVector == Scalar, - Self.CotangentVector == Self) + where Self : SIMDDifferentiable, + Scalar : SIMDDifferentiable) public func sum() -> Scalar { // Implementation note: this eventually be defined to lower to either // llvm.experimental.vector.reduce.fadd or an explicit tree-sum. Open- @@ -1172,20 +1173,16 @@ extension SIMD where Scalar: FloatingPoint { @_transparent @differentiable(vjp: _vjpAdd(lhs:rhs:) - where Scalar: Differentiable, - Self: Differentiable, - Self.CotangentVector == Self, - Scalar.CotangentVector == Scalar) + where Scalar : SIMDDifferentiable, + Self : SIMDDifferentiable) public static func +(lhs: Scalar, rhs: Self) -> Self { return Self(repeating: lhs) + rhs } @_transparent @differentiable(vjp: _vjpAdd(lhs:rhs:) - where Scalar: Differentiable, - Self: Differentiable, - Self.CotangentVector == Self, - Scalar.CotangentVector == Scalar) + where Scalar : SIMDDifferentiable, + Self : SIMDDifferentiable) public static func -(lhs: Scalar, rhs: Self) -> Self { return Self(repeating: lhs) - rhs } @@ -1202,20 +1199,16 @@ extension SIMD where Scalar: FloatingPoint { @_transparent @differentiable(vjp: _vjpAdd(lhs:rhs:) - where Scalar: Differentiable, - Self: Differentiable, - Self.CotangentVector == Self, - Scalar.CotangentVector == Scalar) + where Scalar : SIMDDifferentiable, + Self : SIMDDifferentiable) public static func +(lhs: Self, rhs: Scalar) -> Self { return lhs + Self(repeating: rhs) } @_transparent @differentiable(vjp: _vjpSubtract(lhs:rhs:) - where Scalar: Differentiable, - Self: Differentiable, - Self.CotangentVector == Self, - Scalar.CotangentVector == Scalar) + where Scalar : SIMDDifferentiable, + Self : SIMDDifferentiable) public static func -(lhs: Self, rhs: Scalar) -> Self { return lhs - Self(repeating: rhs) } @@ -1422,17 +1415,14 @@ where T: SIMD, T.Scalar: FloatingPoint { return result } -//public protocol SIMDDifferentiable : Differentiable -// where Scalar : FloatingPoint, -// Self == Self.TangentVector, -// Self == Self.CotangentVector, -// Self == Self.AllDifferentiableVariables {} - +public protocol SIMDDifferentiable : Differentiable + where Self == Self.TangentVector, + Self == Self.CotangentVector, + Self == Self.AllDifferentiableVariables {} extension SIMD - where Self: Differentiable, - Scalar : FloatingPoint, - CotangentVector == Self { + where Self : SIMDDifferentiable, + Scalar : FloatingPoint { @inlinable static func _vjpAdd( lhs: Self, rhs: Self @@ -1453,10 +1443,8 @@ extension SIMD } extension SIMD - where Self: Differentiable, - Scalar : FloatingPoint & Differentiable, - Self.CotangentVector == Self, - Scalar.CotangentVector == Scalar { + where Self : SIMDDifferentiable, + Scalar : FloatingPoint & SIMDDifferentiable { @inlinable static func _vjpAdd( lhs: Scalar, rhs: Self @@ -1494,12 +1482,26 @@ extension SIMD } } + extension SIMD - where Self: Differentiable, - Scalar: Differentiable & FloatingPoint, - Scalar.CotangentVector == Scalar, - Self.CotangentVector == Self { + where Self: SIMDDifferentiable, + Scalar: SIMDDifferentiable & FloatingPoint { public static func _vjpSum() -> (Scalar, (Scalar) -> Self) { return (self.sum(), { v in SIMD(repeating: v) }) } } + +//extension SIMD +// where Self : SIMDDifferentiable, +// Scalar : SIMDDifferentiable & SIMDScalar { +// public func _vjpSubscript(index: SIMD2) -> +// (SIMD2, (SIMD2) -> (CotangentVector, SIMD2)) where Index: FixedWidthInteger & SIMDScalar +// { +// func pullback(_ v: SIMD2) -> (CotangentVector, SIMD2) { +// var gradientOut = SIMD(repeating: 0) +// gradientOut[index] = gradientIn +// return (CotangentVector(gradientOut), index) +// } +// return (self[index], pullback) +// } +//} From 0268fc6c2ceccff8df11e3ba8d4c6bb58febe338 Mon Sep 17 00:00:00 2001 From: Bart Chrzaszcz Date: Wed, 15 May 2019 22:13:33 -0700 Subject: [PATCH 12/26] Make all necessary operators differentiable. --- stdlib/public/core/SIMDVector.swift | 227 ++++++++++++++----- stdlib/public/core/SIMDVectorTypes.swift.gyb | 21 +- 2 files changed, 186 insertions(+), 62 deletions(-) diff --git a/stdlib/public/core/SIMDVector.swift b/stdlib/public/core/SIMDVector.swift index 56c7f7986ccc9..b6edfeeeeda0c 100644 --- a/stdlib/public/core/SIMDVector.swift +++ b/stdlib/public/core/SIMDVector.swift @@ -213,9 +213,6 @@ extension SIMD { /// in this vector. Because of this, the index is always in-range and no trap /// can occur. @_alwaysEmitIntoClient -// @differentiable(vjp: _vjpSubscript(index:) -// where Self : SIMDDifferentiable, -// Scalar : SIMDDifferentiable) public subscript(index: SIMD2) -> SIMD2 where Index: FixedWidthInteger { var result = SIMD2() @@ -785,7 +782,10 @@ extension SIMD where Scalar: FixedWidthInteger { extension SIMD where Scalar: FloatingPoint { @_transparent @differentiable(vjp: _vjpAdd(lhs:rhs:) - where Self : SIMDDifferentiable) + where Self : Differentiable, + Self.CotangentVector : SIMD, + Scalar : BinaryFloatingPoint, + Self.CotangentVector.Scalar: BinaryFloatingPoint) public static func +(lhs: Self, rhs: Self) -> Self { var result = Self() for i in result.indices { result[i] = lhs[i] + rhs[i] } @@ -793,8 +793,11 @@ extension SIMD where Scalar: FloatingPoint { } @_transparent - @differentiable(vjp: _vjpSubtract(lhs:rhs:) - where Self : SIMDDifferentiable) + @differentiable(vjp: _vjpAdd(lhs:rhs:) + where Self: Differentiable, + Self.CotangentVector: SIMD, + Scalar : BinaryFloatingPoint, + Self.CotangentVector.Scalar: BinaryFloatingPoint) public static func -(lhs: Self, rhs: Self) -> Self { var result = Self() for i in result.indices { result[i] = lhs[i] - rhs[i] } @@ -802,6 +805,11 @@ extension SIMD where Scalar: FloatingPoint { } @_transparent + @differentiable(vjp: _vjpMultiply(lhs:rhs:) + where Self: Differentiable, + Self.CotangentVector: SIMD, + Scalar : BinaryFloatingPoint, + Self.CotangentVector == Self) public static func *(lhs: Self, rhs: Self) -> Self { var result = Self() for i in result.indices { result[i] = lhs[i] * rhs[i] } @@ -851,8 +859,11 @@ extension SIMD where Scalar: FloatingPoint { /// Returns the sum of the scalars in the vector. @_alwaysEmitIntoClient @differentiable(vjp: _vjpSum - where Self : SIMDDifferentiable, - Scalar : SIMDDifferentiable) + where Self : Differentiable, + Self.CotangentVector : SIMD, + Scalar : BinaryFloatingPoint & Differentiable, + Scalar.CotangentVector : BinaryFloatingPoint, + Self.CotangentVector == Self) public func sum() -> Scalar { // Implementation note: this eventually be defined to lower to either // llvm.experimental.vector.reduce.fadd or an explicit tree-sum. Open- @@ -1166,59 +1177,95 @@ extension SIMD where Scalar: FixedWidthInteger { extension SIMD where Scalar: FloatingPoint { - @_transparent // ???? + @_transparent public static prefix func -(rhs: Self) -> Self { return 0 - rhs } @_transparent @differentiable(vjp: _vjpAdd(lhs:rhs:) - where Scalar : SIMDDifferentiable, - Self : SIMDDifferentiable) + where Self: Differentiable, + Self.CotangentVector: SIMD, + Scalar : Differentiable & BinaryFloatingPoint, + Scalar.CotangentVector: BinaryFloatingPoint, + Self.CotangentVector.Scalar == Scalar.CotangentVector) public static func +(lhs: Scalar, rhs: Self) -> Self { return Self(repeating: lhs) + rhs } @_transparent @differentiable(vjp: _vjpAdd(lhs:rhs:) - where Scalar : SIMDDifferentiable, - Self : SIMDDifferentiable) + where Self: Differentiable, + Self.CotangentVector: SIMD, + Scalar : Differentiable & BinaryFloatingPoint, + Scalar.CotangentVector: BinaryFloatingPoint, + Self.CotangentVector.Scalar == Scalar.CotangentVector) public static func -(lhs: Scalar, rhs: Self) -> Self { return Self(repeating: lhs) - rhs } @_transparent + @differentiable(vjp: _vjpMultiply(lhs:rhs:) + where Self : Differentiable, + Self.CotangentVector : SIMD, + Scalar : BinaryFloatingPoint & Differentiable, + Self.CotangentVector == Self, + Scalar.CotangentVector == Scalar) public static func *(lhs: Scalar, rhs: Self) -> Self { return Self(repeating: lhs) * rhs } @_transparent + @differentiable(vjp: _vjpDivide(lhs:rhs:) + where Self : Differentiable, + Self.CotangentVector : SIMD, + Scalar : BinaryFloatingPoint & Differentiable, + Self.CotangentVector == Self, + Scalar.CotangentVector == Scalar) public static func /(lhs: Scalar, rhs: Self) -> Self { return Self(repeating: lhs) / rhs } @_transparent @differentiable(vjp: _vjpAdd(lhs:rhs:) - where Scalar : SIMDDifferentiable, - Self : SIMDDifferentiable) + where Self: Differentiable, + Self.CotangentVector: SIMD, + Scalar : Differentiable & BinaryFloatingPoint, + Scalar.CotangentVector: BinaryFloatingPoint, + Self.CotangentVector.Scalar == Scalar.CotangentVector) public static func +(lhs: Self, rhs: Scalar) -> Self { return lhs + Self(repeating: rhs) } @_transparent - @differentiable(vjp: _vjpSubtract(lhs:rhs:) - where Scalar : SIMDDifferentiable, - Self : SIMDDifferentiable) + @differentiable(vjp: _vjpAdd(lhs:rhs:) + where Self: Differentiable, + Self.CotangentVector: SIMD, + Scalar : Differentiable & BinaryFloatingPoint, + Scalar.CotangentVector: BinaryFloatingPoint, + Self.CotangentVector.Scalar == Scalar.CotangentVector) public static func -(lhs: Self, rhs: Scalar) -> Self { return lhs - Self(repeating: rhs) } @_transparent + @differentiable(vjp: _vjpMultiply(lhs:rhs:) + where Self : Differentiable, + Self.CotangentVector : SIMD, + Scalar : BinaryFloatingPoint & Differentiable, + Self.CotangentVector == Self, + Scalar.CotangentVector == Scalar) public static func *(lhs: Self, rhs: Scalar) -> Self { return lhs * Self(repeating: rhs) } @_transparent + @differentiable(vjp: _vjpDivide(lhs:rhs:) + where Self : Differentiable, + Self.CotangentVector : SIMD, + Scalar : BinaryFloatingPoint & Differentiable, + Self.CotangentVector == Self, + Scalar.CotangentVector == Scalar) public static func /(lhs: Self, rhs: Scalar) -> Self { return lhs / Self(repeating: rhs) } @@ -1415,18 +1462,18 @@ where T: SIMD, T.Scalar: FloatingPoint { return result } -public protocol SIMDDifferentiable : Differentiable - where Self == Self.TangentVector, - Self == Self.CotangentVector, - Self == Self.AllDifferentiableVariables {} - extension SIMD - where Self : SIMDDifferentiable, - Scalar : FloatingPoint { + where Self: Differentiable, + CotangentVector: SIMD, + Scalar : BinaryFloatingPoint, + /* Required in order to use unary negation operator due to following error: + >Self.CotangentVector.Scalar' does not conform to protocol 'FloatingPoint' + */ + CotangentVector.Scalar: BinaryFloatingPoint { @inlinable static func _vjpAdd( lhs: Self, rhs: Self - ) -> (Self, (Self) -> (Self, Self)) { + ) -> (Self, (CotangentVector) -> (CotangentVector, CotangentVector)) { return (lhs + rhs, { v in return (v, v) }) @@ -1435,73 +1482,133 @@ extension SIMD @inlinable static func _vjpSubtract( lhs: Self, rhs: Self - ) -> (Self, (Self) -> (Self, Self)) { - return (lhs - rhs, { v in + ) -> (Self, (CotangentVector) -> (CotangentVector, CotangentVector)) { + return (lhs - rhs, { (v: CotangentVector) in return (v, -v) }) } } extension SIMD - where Self : SIMDDifferentiable, - Scalar : FloatingPoint & SIMDDifferentiable { + where Self: Differentiable, + CotangentVector: SIMD, + // error: generic parameter 'Self' could not be inferred: return (lhs * rhs,... + Scalar : BinaryFloatingPoint, + // binary operator '*' cannot be applied to operands of type 'Self.CotangentVector' and 'Self' + Self.CotangentVector == Self { + @inlinable + static func _vjpMultiply( + lhs: Self, rhs: Self + ) -> (Self, (CotangentVector) -> (CotangentVector, CotangentVector)) { + return (lhs * rhs, { (v: CotangentVector) in + return (v * rhs, v * lhs) + }) + } + + @inlinable + static func _vjpDivide( + lhs: Self, rhs: Self + ) -> (Self, (CotangentVector) -> (CotangentVector, CotangentVector)) { + return (lhs / rhs, { (v: CotangentVector) in + (v / rhs, -lhs / (rhs * rhs) * v) + }) + } +} + +extension SIMD + where Self : Differentiable, + CotangentVector : SIMD, + Scalar : BinaryFloatingPoint & Differentiable, + Scalar.CotangentVector: BinaryFloatingPoint, + CotangentVector.Scalar == Scalar.CotangentVector { @inlinable static func _vjpAdd( lhs: Scalar, rhs: Self - ) -> (Self, (Self) -> (Scalar, Self)) { - return (lhs + rhs, { v in + ) -> (Self, (CotangentVector) -> (Scalar.CotangentVector, CotangentVector)) { + return (lhs + rhs, { (v: CotangentVector) in return (v.sum(), v) }) } - + @inlinable static func _vjpSubtract( lhs: Scalar, rhs: Self - ) -> (Self, (Self) -> (Scalar, Self)) { - return (lhs + rhs, { v in + ) -> (Self, (CotangentVector) -> (Scalar.CotangentVector, CotangentVector)) { + return (lhs + rhs, { (v: CotangentVector) in return (v.sum(), -v) }) } - + @inlinable static func _vjpAdd( lhs: Self, rhs: Scalar - ) -> (Self, (Self) -> (Self, Scalar)) { - return (lhs + rhs, { v in + ) -> (Self, (CotangentVector) -> (CotangentVector, Scalar.CotangentVector)) { + return (lhs + rhs, { (v: CotangentVector) in return (v, v.sum()) }) } - + @inlinable static func _vjpSubtract( lhs: Self, rhs: Scalar - ) -> (Self, (Self) -> (Self, Scalar)) { - return (lhs + rhs, { v in + ) -> (Self, (CotangentVector) -> (CotangentVector, Scalar.CotangentVector)) { + return (lhs + rhs, { (v: CotangentVector) in return (v, -v.sum()) }) } } - extension SIMD - where Self: SIMDDifferentiable, - Scalar: SIMDDifferentiable & FloatingPoint { - public static func _vjpSum() -> (Scalar, (Scalar) -> Self) { - return (self.sum(), { v in SIMD(repeating: v) }) + where Self : Differentiable, + CotangentVector : SIMD, + Scalar : BinaryFloatingPoint & Differentiable, + Self.CotangentVector == Self, + Scalar.CotangentVector == Scalar { + @inlinable + static func _vjpMultiply( + lhs: Self, rhs: Scalar + ) -> (Self, (CotangentVector) -> (CotangentVector, Scalar.CotangentVector)) { + return (lhs * rhs, { (v: CotangentVector) in + return (v * rhs, (v * lhs).sum()) + }) + } + + @inlinable + static func _vjpDivide( + lhs: Self, rhs: Scalar + ) -> (Self, (CotangentVector) -> (CotangentVector, Scalar.CotangentVector)) { + return (lhs / rhs, { (v: CotangentVector) in + (-lhs / (rhs * rhs) * v, (v / rhs).sum()) + }) + } + + @inlinable + static func _vjpMultiply( + lhs: Scalar, rhs: Self + ) -> (Self, (CotangentVector) -> (Scalar.CotangentVector, CotangentVector)) { + return (lhs * rhs, { (v: CotangentVector) in + return ((v * lhs).sum(), v * rhs) + }) + } + + @inlinable + static func _vjpDivide( + lhs: Scalar, rhs: Self + ) -> (Self, (CotangentVector) -> (Scalar.CotangentVector, CotangentVector)) { + return (lhs / rhs, { (v: CotangentVector) in + ((v / rhs).sum(), -lhs / (rhs * rhs) * v) + }) } } -//extension SIMD -// where Self : SIMDDifferentiable, -// Scalar : SIMDDifferentiable & SIMDScalar { -// public func _vjpSubscript(index: SIMD2) -> -// (SIMD2, (SIMD2) -> (CotangentVector, SIMD2)) where Index: FixedWidthInteger & SIMDScalar -// { -// func pullback(_ v: SIMD2) -> (CotangentVector, SIMD2) { -// var gradientOut = SIMD(repeating: 0) -// gradientOut[index] = gradientIn -// return (CotangentVector(gradientOut), index) -// } -// return (self[index], pullback) -// } -//} +extension SIMD + where Self : Differentiable, + CotangentVector : SIMD, + Scalar : BinaryFloatingPoint & Differentiable, + Scalar.CotangentVector : BinaryFloatingPoint, + CotangentVector == Self { + @usableFromInline + func _vjpSum() -> (Scalar, (Scalar.CotangentVector) -> CotangentVector) { + return (sum(), { (v: Scalar.CotangentVector) in Self.init(repeating: Scalar(v)) }) + } +} diff --git a/stdlib/public/core/SIMDVectorTypes.swift.gyb b/stdlib/public/core/SIMDVectorTypes.swift.gyb index 6f114419228fa..3c4803b9221bf 100644 --- a/stdlib/public/core/SIMDVectorTypes.swift.gyb +++ b/stdlib/public/core/SIMDVectorTypes.swift.gyb @@ -44,7 +44,9 @@ public struct SIMD${n}: SIMD where Scalar: SIMDScalar { _storage = Scalar.SIMD${storageN}Storage() } - /// Accesses the scalar at the specified position. + @differentiable(vjp: _vjpSubscript + where Scalar : Differentiable & BinaryFloatingPoint, + Scalar.CotangentVector : BinaryFloatingPoint) public subscript(index: Int) -> Scalar { @_transparent get { _precondition(indices.contains(index)) @@ -189,7 +191,7 @@ extension SIMD${n} : AdditiveArithmetic where Scalar : FloatingPoint {} extension SIMD${n} : VectorNumeric where Scalar : FloatingPoint {} -extension SIMD${n} : Differentiable where Scalar : Differentiable & FloatingPoint { +extension SIMD${n} : Differentiable where Scalar : Differentiable & BinaryFloatingPoint { public typealias TangentVector = SIMD${n} public typealias CotangentVector = SIMD${n} public typealias AllDifferentiableVariables = SIMD${n} @@ -198,6 +200,21 @@ extension SIMD${n} : Differentiable where Scalar : Differentiable & FloatingPoin } } +extension SIMD${n} + where Scalar : Differentiable & BinaryFloatingPoint, + Scalar.CotangentVector : BinaryFloatingPoint { + public func _vjpSubscript(index: Int) -> + (Scalar, (Scalar.CotangentVector) -> CotangentVector) + { + return (self[index], { (v: Scalar.CotangentVector) in + var zeros = Self.zero + zeros[index] = Scalar(v) + return zeros + } + ) + } +} + %end extension SIMD3 { From 72d13e5fd56f18e21126d11d2f2296df61f1b23e Mon Sep 17 00:00:00 2001 From: Bart Chrzaszcz Date: Thu, 16 May 2019 10:38:27 -0700 Subject: [PATCH 13/26] Address 1st batch of PR comments. --- stdlib/public/core/SIMDVector.swift | 60 ++++++++++++-------- stdlib/public/core/SIMDVectorTypes.swift.gyb | 10 ++-- 2 files changed, 39 insertions(+), 31 deletions(-) diff --git a/stdlib/public/core/SIMDVector.swift b/stdlib/public/core/SIMDVector.swift index b6edfeeeeda0c..9f03f3532fcac 100644 --- a/stdlib/public/core/SIMDVector.swift +++ b/stdlib/public/core/SIMDVector.swift @@ -781,6 +781,7 @@ extension SIMD where Scalar: FixedWidthInteger { // be replaced with @_semantics to lower directly to vector IR nodes. extension SIMD where Scalar: FloatingPoint { @_transparent + // SWIFT_ENABLE_TENSORFLOW @differentiable(vjp: _vjpAdd(lhs:rhs:) where Self : Differentiable, Self.CotangentVector : SIMD, @@ -793,6 +794,7 @@ extension SIMD where Scalar: FloatingPoint { } @_transparent + // SWIFT_ENABLE_TENSORFLOW @differentiable(vjp: _vjpAdd(lhs:rhs:) where Self: Differentiable, Self.CotangentVector: SIMD, @@ -805,6 +807,7 @@ extension SIMD where Scalar: FloatingPoint { } @_transparent + // SWIFT_ENABLE_TENSORFLOW @differentiable(vjp: _vjpMultiply(lhs:rhs:) where Self: Differentiable, Self.CotangentVector: SIMD, @@ -858,6 +861,7 @@ extension SIMD where Scalar: FloatingPoint { /// Returns the sum of the scalars in the vector. @_alwaysEmitIntoClient + // SWIFT_ENABLE_TENSORFLOW @differentiable(vjp: _vjpSum where Self : Differentiable, Self.CotangentVector : SIMD, @@ -1183,6 +1187,7 @@ extension SIMD where Scalar: FloatingPoint { } @_transparent + // SWIFT_ENABLE_TENSORFLOW @differentiable(vjp: _vjpAdd(lhs:rhs:) where Self: Differentiable, Self.CotangentVector: SIMD, @@ -1194,6 +1199,7 @@ extension SIMD where Scalar: FloatingPoint { } @_transparent + // SWIFT_ENABLE_TENSORFLOW @differentiable(vjp: _vjpAdd(lhs:rhs:) where Self: Differentiable, Self.CotangentVector: SIMD, @@ -1205,6 +1211,7 @@ extension SIMD where Scalar: FloatingPoint { } @_transparent + // SWIFT_ENABLE_TENSORFLOW @differentiable(vjp: _vjpMultiply(lhs:rhs:) where Self : Differentiable, Self.CotangentVector : SIMD, @@ -1216,6 +1223,7 @@ extension SIMD where Scalar: FloatingPoint { } @_transparent + // SWIFT_ENABLE_TENSORFLOW @differentiable(vjp: _vjpDivide(lhs:rhs:) where Self : Differentiable, Self.CotangentVector : SIMD, @@ -1227,6 +1235,7 @@ extension SIMD where Scalar: FloatingPoint { } @_transparent + // SWIFT_ENABLE_TENSORFLOW @differentiable(vjp: _vjpAdd(lhs:rhs:) where Self: Differentiable, Self.CotangentVector: SIMD, @@ -1238,6 +1247,7 @@ extension SIMD where Scalar: FloatingPoint { } @_transparent + // SWIFT_ENABLE_TENSORFLOW @differentiable(vjp: _vjpAdd(lhs:rhs:) where Self: Differentiable, Self.CotangentVector: SIMD, @@ -1249,6 +1259,7 @@ extension SIMD where Scalar: FloatingPoint { } @_transparent + // SWIFT_ENABLE_TENSORFLOW @differentiable(vjp: _vjpMultiply(lhs:rhs:) where Self : Differentiable, Self.CotangentVector : SIMD, @@ -1260,6 +1271,7 @@ extension SIMD where Scalar: FloatingPoint { } @_transparent + // SWIFT_ENABLE_TENSORFLOW @differentiable(vjp: _vjpDivide(lhs:rhs:) where Self : Differentiable, Self.CotangentVector : SIMD, @@ -1473,7 +1485,7 @@ extension SIMD @inlinable static func _vjpAdd( lhs: Self, rhs: Self - ) -> (Self, (CotangentVector) -> (CotangentVector, CotangentVector)) { + ) -> (Self, (CotangentVector) -> (CotangentVector, CotangentVector)) { return (lhs + rhs, { v in return (v, v) }) @@ -1482,7 +1494,7 @@ extension SIMD @inlinable static func _vjpSubtract( lhs: Self, rhs: Self - ) -> (Self, (CotangentVector) -> (CotangentVector, CotangentVector)) { + ) -> (Self, (CotangentVector) -> (CotangentVector, CotangentVector)) { return (lhs - rhs, { (v: CotangentVector) in return (v, -v) }) @@ -1492,15 +1504,13 @@ extension SIMD extension SIMD where Self: Differentiable, CotangentVector: SIMD, - // error: generic parameter 'Self' could not be inferred: return (lhs * rhs,... Scalar : BinaryFloatingPoint, - // binary operator '*' cannot be applied to operands of type 'Self.CotangentVector' and 'Self' Self.CotangentVector == Self { @inlinable static func _vjpMultiply( lhs: Self, rhs: Self - ) -> (Self, (CotangentVector) -> (CotangentVector, CotangentVector)) { - return (lhs * rhs, { (v: CotangentVector) in + ) -> (Self, (CotangentVector) -> (CotangentVector, CotangentVector)) { + return (lhs * rhs, { v in return (v * rhs, v * lhs) }) } @@ -1508,8 +1518,8 @@ extension SIMD @inlinable static func _vjpDivide( lhs: Self, rhs: Self - ) -> (Self, (CotangentVector) -> (CotangentVector, CotangentVector)) { - return (lhs / rhs, { (v: CotangentVector) in + ) -> (Self, (CotangentVector) -> (CotangentVector, CotangentVector)) { + return (lhs / rhs, { v in (v / rhs, -lhs / (rhs * rhs) * v) }) } @@ -1524,8 +1534,8 @@ extension SIMD @inlinable static func _vjpAdd( lhs: Scalar, rhs: Self - ) -> (Self, (CotangentVector) -> (Scalar.CotangentVector, CotangentVector)) { - return (lhs + rhs, { (v: CotangentVector) in + ) -> (Self, (CotangentVector) -> (Scalar.CotangentVector, CotangentVector)) { + return (lhs + rhs, { v in return (v.sum(), v) }) } @@ -1533,8 +1543,8 @@ extension SIMD @inlinable static func _vjpSubtract( lhs: Scalar, rhs: Self - ) -> (Self, (CotangentVector) -> (Scalar.CotangentVector, CotangentVector)) { - return (lhs + rhs, { (v: CotangentVector) in + ) -> (Self, (CotangentVector) -> (Scalar.CotangentVector, CotangentVector)) { + return (lhs + rhs, { v in return (v.sum(), -v) }) } @@ -1542,8 +1552,8 @@ extension SIMD @inlinable static func _vjpAdd( lhs: Self, rhs: Scalar - ) -> (Self, (CotangentVector) -> (CotangentVector, Scalar.CotangentVector)) { - return (lhs + rhs, { (v: CotangentVector) in + ) -> (Self, (CotangentVector) -> (CotangentVector, Scalar.CotangentVector)) { + return (lhs + rhs, { v in return (v, v.sum()) }) } @@ -1551,8 +1561,8 @@ extension SIMD @inlinable static func _vjpSubtract( lhs: Self, rhs: Scalar - ) -> (Self, (CotangentVector) -> (CotangentVector, Scalar.CotangentVector)) { - return (lhs + rhs, { (v: CotangentVector) in + ) -> (Self, (CotangentVector) -> (CotangentVector, Scalar.CotangentVector)) { + return (lhs + rhs, { v in return (v, -v.sum()) }) } @@ -1567,8 +1577,8 @@ extension SIMD @inlinable static func _vjpMultiply( lhs: Self, rhs: Scalar - ) -> (Self, (CotangentVector) -> (CotangentVector, Scalar.CotangentVector)) { - return (lhs * rhs, { (v: CotangentVector) in + ) -> (Self, (CotangentVector) -> (CotangentVector, Scalar.CotangentVector)) { + return (lhs * rhs, { v in return (v * rhs, (v * lhs).sum()) }) } @@ -1576,8 +1586,8 @@ extension SIMD @inlinable static func _vjpDivide( lhs: Self, rhs: Scalar - ) -> (Self, (CotangentVector) -> (CotangentVector, Scalar.CotangentVector)) { - return (lhs / rhs, { (v: CotangentVector) in + ) -> (Self, (CotangentVector) -> (CotangentVector, Scalar.CotangentVector)) { + return (lhs / rhs, { v in (-lhs / (rhs * rhs) * v, (v / rhs).sum()) }) } @@ -1585,8 +1595,8 @@ extension SIMD @inlinable static func _vjpMultiply( lhs: Scalar, rhs: Self - ) -> (Self, (CotangentVector) -> (Scalar.CotangentVector, CotangentVector)) { - return (lhs * rhs, { (v: CotangentVector) in + ) -> (Self, (CotangentVector) -> (Scalar.CotangentVector, CotangentVector)) { + return (lhs * rhs, { v in return ((v * lhs).sum(), v * rhs) }) } @@ -1594,8 +1604,8 @@ extension SIMD @inlinable static func _vjpDivide( lhs: Scalar, rhs: Self - ) -> (Self, (CotangentVector) -> (Scalar.CotangentVector, CotangentVector)) { - return (lhs / rhs, { (v: CotangentVector) in + ) -> (Self, (CotangentVector) -> (Scalar.CotangentVector, CotangentVector)) { + return (lhs / rhs, { v in ((v / rhs).sum(), -lhs / (rhs * rhs) * v) }) } @@ -1609,6 +1619,6 @@ extension SIMD CotangentVector == Self { @usableFromInline func _vjpSum() -> (Scalar, (Scalar.CotangentVector) -> CotangentVector) { - return (sum(), { (v: Scalar.CotangentVector) in Self.init(repeating: Scalar(v)) }) + return (sum(), { v in Self(repeating: Scalar(v)) }) } } diff --git a/stdlib/public/core/SIMDVectorTypes.swift.gyb b/stdlib/public/core/SIMDVectorTypes.swift.gyb index 3c4803b9221bf..c655b7bea6ada 100644 --- a/stdlib/public/core/SIMDVectorTypes.swift.gyb +++ b/stdlib/public/core/SIMDVectorTypes.swift.gyb @@ -203,15 +203,13 @@ extension SIMD${n} : Differentiable where Scalar : Differentiable & BinaryFloati extension SIMD${n} where Scalar : Differentiable & BinaryFloatingPoint, Scalar.CotangentVector : BinaryFloatingPoint { - public func _vjpSubscript(index: Int) -> - (Scalar, (Scalar.CotangentVector) -> CotangentVector) - { - return (self[index], { (v: Scalar.CotangentVector) in + public func _vjpSubscript(index: Int) + -> (Scalar, (Scalar.CotangentVector) -> CotangentVector) { + return (self[index], { v in var zeros = Self.zero zeros[index] = Scalar(v) return zeros - } - ) + }) } } From e057686ef8acf85302352fa707e6fa41a16f37fe Mon Sep 17 00:00:00 2001 From: Bart Chrzaszcz Date: Thu, 16 May 2019 11:58:15 -0700 Subject: [PATCH 14/26] Fix due to changes in PR #24825. --- stdlib/public/core/SIMDVector.swift | 123 +++++++++---------- stdlib/public/core/SIMDVectorTypes.swift.gyb | 9 +- 2 files changed, 64 insertions(+), 68 deletions(-) diff --git a/stdlib/public/core/SIMDVector.swift b/stdlib/public/core/SIMDVector.swift index 9f03f3532fcac..018e9e0d23eb1 100644 --- a/stdlib/public/core/SIMDVector.swift +++ b/stdlib/public/core/SIMDVector.swift @@ -784,9 +784,9 @@ extension SIMD where Scalar: FloatingPoint { // SWIFT_ENABLE_TENSORFLOW @differentiable(vjp: _vjpAdd(lhs:rhs:) where Self : Differentiable, - Self.CotangentVector : SIMD, + Self.TangentVector : SIMD, Scalar : BinaryFloatingPoint, - Self.CotangentVector.Scalar: BinaryFloatingPoint) + Self.TangentVector.Scalar: BinaryFloatingPoint) public static func +(lhs: Self, rhs: Self) -> Self { var result = Self() for i in result.indices { result[i] = lhs[i] + rhs[i] } @@ -797,9 +797,9 @@ extension SIMD where Scalar: FloatingPoint { // SWIFT_ENABLE_TENSORFLOW @differentiable(vjp: _vjpAdd(lhs:rhs:) where Self: Differentiable, - Self.CotangentVector: SIMD, + Self.TangentVector: SIMD, Scalar : BinaryFloatingPoint, - Self.CotangentVector.Scalar: BinaryFloatingPoint) + Self.TangentVector.Scalar: BinaryFloatingPoint) public static func -(lhs: Self, rhs: Self) -> Self { var result = Self() for i in result.indices { result[i] = lhs[i] - rhs[i] } @@ -810,9 +810,9 @@ extension SIMD where Scalar: FloatingPoint { // SWIFT_ENABLE_TENSORFLOW @differentiable(vjp: _vjpMultiply(lhs:rhs:) where Self: Differentiable, - Self.CotangentVector: SIMD, + Self.TangentVector: SIMD, Scalar : BinaryFloatingPoint, - Self.CotangentVector == Self) + Self.TangentVector == Self) public static func *(lhs: Self, rhs: Self) -> Self { var result = Self() for i in result.indices { result[i] = lhs[i] * rhs[i] } @@ -864,10 +864,10 @@ extension SIMD where Scalar: FloatingPoint { // SWIFT_ENABLE_TENSORFLOW @differentiable(vjp: _vjpSum where Self : Differentiable, - Self.CotangentVector : SIMD, + Self.TangentVector : SIMD, Scalar : BinaryFloatingPoint & Differentiable, - Scalar.CotangentVector : BinaryFloatingPoint, - Self.CotangentVector == Self) + Scalar.TangentVector : BinaryFloatingPoint, + Self.TangentVector == Self) public func sum() -> Scalar { // Implementation note: this eventually be defined to lower to either // llvm.experimental.vector.reduce.fadd or an explicit tree-sum. Open- @@ -1190,10 +1190,10 @@ extension SIMD where Scalar: FloatingPoint { // SWIFT_ENABLE_TENSORFLOW @differentiable(vjp: _vjpAdd(lhs:rhs:) where Self: Differentiable, - Self.CotangentVector: SIMD, + Self.TangentVector: SIMD, Scalar : Differentiable & BinaryFloatingPoint, - Scalar.CotangentVector: BinaryFloatingPoint, - Self.CotangentVector.Scalar == Scalar.CotangentVector) + Scalar.TangentVector: BinaryFloatingPoint, + Self.TangentVector.Scalar == Scalar.TangentVector) public static func +(lhs: Scalar, rhs: Self) -> Self { return Self(repeating: lhs) + rhs } @@ -1202,10 +1202,10 @@ extension SIMD where Scalar: FloatingPoint { // SWIFT_ENABLE_TENSORFLOW @differentiable(vjp: _vjpAdd(lhs:rhs:) where Self: Differentiable, - Self.CotangentVector: SIMD, + Self.TangentVector: SIMD, Scalar : Differentiable & BinaryFloatingPoint, - Scalar.CotangentVector: BinaryFloatingPoint, - Self.CotangentVector.Scalar == Scalar.CotangentVector) + Scalar.TangentVector: BinaryFloatingPoint, + Self.TangentVector.Scalar == Scalar.TangentVector) public static func -(lhs: Scalar, rhs: Self) -> Self { return Self(repeating: lhs) - rhs } @@ -1214,10 +1214,10 @@ extension SIMD where Scalar: FloatingPoint { // SWIFT_ENABLE_TENSORFLOW @differentiable(vjp: _vjpMultiply(lhs:rhs:) where Self : Differentiable, - Self.CotangentVector : SIMD, + Self.TangentVector : SIMD, Scalar : BinaryFloatingPoint & Differentiable, - Self.CotangentVector == Self, - Scalar.CotangentVector == Scalar) + Self.TangentVector == Self, + Scalar.TangentVector == Scalar) public static func *(lhs: Scalar, rhs: Self) -> Self { return Self(repeating: lhs) * rhs } @@ -1226,10 +1226,10 @@ extension SIMD where Scalar: FloatingPoint { // SWIFT_ENABLE_TENSORFLOW @differentiable(vjp: _vjpDivide(lhs:rhs:) where Self : Differentiable, - Self.CotangentVector : SIMD, + Self.TangentVector : SIMD, Scalar : BinaryFloatingPoint & Differentiable, - Self.CotangentVector == Self, - Scalar.CotangentVector == Scalar) + Self.TangentVector == Self, + Scalar.TangentVector == Scalar) public static func /(lhs: Scalar, rhs: Self) -> Self { return Self(repeating: lhs) / rhs } @@ -1238,10 +1238,10 @@ extension SIMD where Scalar: FloatingPoint { // SWIFT_ENABLE_TENSORFLOW @differentiable(vjp: _vjpAdd(lhs:rhs:) where Self: Differentiable, - Self.CotangentVector: SIMD, + Self.TangentVector: SIMD, Scalar : Differentiable & BinaryFloatingPoint, - Scalar.CotangentVector: BinaryFloatingPoint, - Self.CotangentVector.Scalar == Scalar.CotangentVector) + Scalar.TangentVector: BinaryFloatingPoint, + Self.TangentVector.Scalar == Scalar.TangentVector) public static func +(lhs: Self, rhs: Scalar) -> Self { return lhs + Self(repeating: rhs) } @@ -1250,10 +1250,10 @@ extension SIMD where Scalar: FloatingPoint { // SWIFT_ENABLE_TENSORFLOW @differentiable(vjp: _vjpAdd(lhs:rhs:) where Self: Differentiable, - Self.CotangentVector: SIMD, + Self.TangentVector: SIMD, Scalar : Differentiable & BinaryFloatingPoint, - Scalar.CotangentVector: BinaryFloatingPoint, - Self.CotangentVector.Scalar == Scalar.CotangentVector) + Scalar.TangentVector: BinaryFloatingPoint, + Self.TangentVector.Scalar == Scalar.TangentVector) public static func -(lhs: Self, rhs: Scalar) -> Self { return lhs - Self(repeating: rhs) } @@ -1262,10 +1262,10 @@ extension SIMD where Scalar: FloatingPoint { // SWIFT_ENABLE_TENSORFLOW @differentiable(vjp: _vjpMultiply(lhs:rhs:) where Self : Differentiable, - Self.CotangentVector : SIMD, + Self.TangentVector : SIMD, Scalar : BinaryFloatingPoint & Differentiable, - Self.CotangentVector == Self, - Scalar.CotangentVector == Scalar) + Self.TangentVector == Self, + Scalar.TangentVector == Scalar) public static func *(lhs: Self, rhs: Scalar) -> Self { return lhs * Self(repeating: rhs) } @@ -1274,10 +1274,10 @@ extension SIMD where Scalar: FloatingPoint { // SWIFT_ENABLE_TENSORFLOW @differentiable(vjp: _vjpDivide(lhs:rhs:) where Self : Differentiable, - Self.CotangentVector : SIMD, + Self.TangentVector : SIMD, Scalar : BinaryFloatingPoint & Differentiable, - Self.CotangentVector == Self, - Scalar.CotangentVector == Scalar) + Self.TangentVector == Self, + Scalar.TangentVector == Scalar) public static func /(lhs: Self, rhs: Scalar) -> Self { return lhs / Self(repeating: rhs) } @@ -1476,16 +1476,13 @@ where T: SIMD, T.Scalar: FloatingPoint { extension SIMD where Self: Differentiable, - CotangentVector: SIMD, + TangentVector: SIMD, Scalar : BinaryFloatingPoint, - /* Required in order to use unary negation operator due to following error: - >Self.CotangentVector.Scalar' does not conform to protocol 'FloatingPoint' - */ - CotangentVector.Scalar: BinaryFloatingPoint { + TangentVector.Scalar: BinaryFloatingPoint { @inlinable static func _vjpAdd( lhs: Self, rhs: Self - ) -> (Self, (CotangentVector) -> (CotangentVector, CotangentVector)) { + ) -> (Self, (TangentVector) -> (TangentVector, TangentVector)) { return (lhs + rhs, { v in return (v, v) }) @@ -1494,8 +1491,8 @@ extension SIMD @inlinable static func _vjpSubtract( lhs: Self, rhs: Self - ) -> (Self, (CotangentVector) -> (CotangentVector, CotangentVector)) { - return (lhs - rhs, { (v: CotangentVector) in + ) -> (Self, (TangentVector) -> (TangentVector, TangentVector)) { + return (lhs - rhs, { (v: TangentVector) in return (v, -v) }) } @@ -1503,13 +1500,13 @@ extension SIMD extension SIMD where Self: Differentiable, - CotangentVector: SIMD, + TangentVector: SIMD, Scalar : BinaryFloatingPoint, - Self.CotangentVector == Self { + Self.TangentVector == Self { @inlinable static func _vjpMultiply( lhs: Self, rhs: Self - ) -> (Self, (CotangentVector) -> (CotangentVector, CotangentVector)) { + ) -> (Self, (TangentVector) -> (TangentVector, TangentVector)) { return (lhs * rhs, { v in return (v * rhs, v * lhs) }) @@ -1518,7 +1515,7 @@ extension SIMD @inlinable static func _vjpDivide( lhs: Self, rhs: Self - ) -> (Self, (CotangentVector) -> (CotangentVector, CotangentVector)) { + ) -> (Self, (TangentVector) -> (TangentVector, TangentVector)) { return (lhs / rhs, { v in (v / rhs, -lhs / (rhs * rhs) * v) }) @@ -1527,14 +1524,14 @@ extension SIMD extension SIMD where Self : Differentiable, - CotangentVector : SIMD, + TangentVector : SIMD, Scalar : BinaryFloatingPoint & Differentiable, - Scalar.CotangentVector: BinaryFloatingPoint, - CotangentVector.Scalar == Scalar.CotangentVector { + Scalar.TangentVector: BinaryFloatingPoint, + TangentVector.Scalar == Scalar.TangentVector { @inlinable static func _vjpAdd( lhs: Scalar, rhs: Self - ) -> (Self, (CotangentVector) -> (Scalar.CotangentVector, CotangentVector)) { + ) -> (Self, (TangentVector) -> (Scalar.TangentVector, TangentVector)) { return (lhs + rhs, { v in return (v.sum(), v) }) @@ -1543,7 +1540,7 @@ extension SIMD @inlinable static func _vjpSubtract( lhs: Scalar, rhs: Self - ) -> (Self, (CotangentVector) -> (Scalar.CotangentVector, CotangentVector)) { + ) -> (Self, (TangentVector) -> (Scalar.TangentVector, TangentVector)) { return (lhs + rhs, { v in return (v.sum(), -v) }) @@ -1552,7 +1549,7 @@ extension SIMD @inlinable static func _vjpAdd( lhs: Self, rhs: Scalar - ) -> (Self, (CotangentVector) -> (CotangentVector, Scalar.CotangentVector)) { + ) -> (Self, (TangentVector) -> (TangentVector, Scalar.TangentVector)) { return (lhs + rhs, { v in return (v, v.sum()) }) @@ -1561,7 +1558,7 @@ extension SIMD @inlinable static func _vjpSubtract( lhs: Self, rhs: Scalar - ) -> (Self, (CotangentVector) -> (CotangentVector, Scalar.CotangentVector)) { + ) -> (Self, (TangentVector) -> (TangentVector, Scalar.TangentVector)) { return (lhs + rhs, { v in return (v, -v.sum()) }) @@ -1570,14 +1567,14 @@ extension SIMD extension SIMD where Self : Differentiable, - CotangentVector : SIMD, + TangentVector : SIMD, Scalar : BinaryFloatingPoint & Differentiable, - Self.CotangentVector == Self, - Scalar.CotangentVector == Scalar { + Self.TangentVector == Self, + Scalar.TangentVector == Scalar { @inlinable static func _vjpMultiply( lhs: Self, rhs: Scalar - ) -> (Self, (CotangentVector) -> (CotangentVector, Scalar.CotangentVector)) { + ) -> (Self, (TangentVector) -> (TangentVector, Scalar.TangentVector)) { return (lhs * rhs, { v in return (v * rhs, (v * lhs).sum()) }) @@ -1586,7 +1583,7 @@ extension SIMD @inlinable static func _vjpDivide( lhs: Self, rhs: Scalar - ) -> (Self, (CotangentVector) -> (CotangentVector, Scalar.CotangentVector)) { + ) -> (Self, (TangentVector) -> (TangentVector, Scalar.TangentVector)) { return (lhs / rhs, { v in (-lhs / (rhs * rhs) * v, (v / rhs).sum()) }) @@ -1595,7 +1592,7 @@ extension SIMD @inlinable static func _vjpMultiply( lhs: Scalar, rhs: Self - ) -> (Self, (CotangentVector) -> (Scalar.CotangentVector, CotangentVector)) { + ) -> (Self, (TangentVector) -> (Scalar.TangentVector, TangentVector)) { return (lhs * rhs, { v in return ((v * lhs).sum(), v * rhs) }) @@ -1604,7 +1601,7 @@ extension SIMD @inlinable static func _vjpDivide( lhs: Scalar, rhs: Self - ) -> (Self, (CotangentVector) -> (Scalar.CotangentVector, CotangentVector)) { + ) -> (Self, (TangentVector) -> (Scalar.TangentVector, TangentVector)) { return (lhs / rhs, { v in ((v / rhs).sum(), -lhs / (rhs * rhs) * v) }) @@ -1613,12 +1610,12 @@ extension SIMD extension SIMD where Self : Differentiable, - CotangentVector : SIMD, + TangentVector : SIMD, Scalar : BinaryFloatingPoint & Differentiable, - Scalar.CotangentVector : BinaryFloatingPoint, - CotangentVector == Self { + Scalar.TangentVector : BinaryFloatingPoint, + TangentVector == Self { @usableFromInline - func _vjpSum() -> (Scalar, (Scalar.CotangentVector) -> CotangentVector) { + func _vjpSum() -> (Scalar, (Scalar.TangentVector) -> TangentVector) { return (sum(), { v in Self(repeating: Scalar(v)) }) } } diff --git a/stdlib/public/core/SIMDVectorTypes.swift.gyb b/stdlib/public/core/SIMDVectorTypes.swift.gyb index c655b7bea6ada..ea43db4f439e2 100644 --- a/stdlib/public/core/SIMDVectorTypes.swift.gyb +++ b/stdlib/public/core/SIMDVectorTypes.swift.gyb @@ -46,7 +46,7 @@ public struct SIMD${n}: SIMD where Scalar: SIMDScalar { @differentiable(vjp: _vjpSubscript where Scalar : Differentiable & BinaryFloatingPoint, - Scalar.CotangentVector : BinaryFloatingPoint) + Scalar.TangentVector : BinaryFloatingPoint) public subscript(index: Int) -> Scalar { @_transparent get { _precondition(indices.contains(index)) @@ -193,18 +193,17 @@ extension SIMD${n} : VectorNumeric where Scalar : FloatingPoint {} extension SIMD${n} : Differentiable where Scalar : Differentiable & BinaryFloatingPoint { public typealias TangentVector = SIMD${n} - public typealias CotangentVector = SIMD${n} public typealias AllDifferentiableVariables = SIMD${n} - public func tangentVector(from cotangent: CotangentVector) -> TangentVector { + public func tangentVector(from cotangent: TangentVector) -> TangentVector { return cotangent } } extension SIMD${n} where Scalar : Differentiable & BinaryFloatingPoint, - Scalar.CotangentVector : BinaryFloatingPoint { + Scalar.TangentVector : BinaryFloatingPoint { public func _vjpSubscript(index: Int) - -> (Scalar, (Scalar.CotangentVector) -> CotangentVector) { + -> (Scalar, (Scalar.TangentVector) -> TangentVector) { return (self[index], { v in var zeros = Self.zero zeros[index] = Scalar(v) From 260f2fb72e24154909a7d6f145422adf003b5a69 Mon Sep 17 00:00:00 2001 From: Bart Chrzaszcz Date: Thu, 16 May 2019 12:04:19 -0700 Subject: [PATCH 15/26] Address 2nd batch of PR comments. --- stdlib/public/core/SIMDVector.swift | 1 + stdlib/public/core/SIMDVectorTypes.swift.gyb | 3 +++ 2 files changed, 4 insertions(+) diff --git a/stdlib/public/core/SIMDVector.swift b/stdlib/public/core/SIMDVector.swift index 018e9e0d23eb1..a8f3e4ed78d99 100644 --- a/stdlib/public/core/SIMDVector.swift +++ b/stdlib/public/core/SIMDVector.swift @@ -1474,6 +1474,7 @@ where T: SIMD, T.Scalar: FloatingPoint { return result } +// SWIFT_ENABLE_TENSORFLOW extension SIMD where Self: Differentiable, TangentVector: SIMD, diff --git a/stdlib/public/core/SIMDVectorTypes.swift.gyb b/stdlib/public/core/SIMDVectorTypes.swift.gyb index ea43db4f439e2..a740edbd6750d 100644 --- a/stdlib/public/core/SIMDVectorTypes.swift.gyb +++ b/stdlib/public/core/SIMDVectorTypes.swift.gyb @@ -44,6 +44,8 @@ public struct SIMD${n}: SIMD where Scalar: SIMDScalar { _storage = Scalar.SIMD${storageN}Storage() } + /// Accesses the scalar at the specified position. + // SWIFT_ENABLE_TENSORFLOW @differentiable(vjp: _vjpSubscript where Scalar : Differentiable & BinaryFloatingPoint, Scalar.TangentVector : BinaryFloatingPoint) @@ -187,6 +189,7 @@ extension SIMD${n} where Scalar: BinaryFloatingPoint { } } +// SWIFT_ENABLE_TENSORFLOW extension SIMD${n} : AdditiveArithmetic where Scalar : FloatingPoint {} extension SIMD${n} : VectorNumeric where Scalar : FloatingPoint {} From c4de3c9cec3c4f005b1dbecf65cf4bd8d6615b10 Mon Sep 17 00:00:00 2001 From: Bart Chrzaszcz Date: Thu, 16 May 2019 12:10:48 -0700 Subject: [PATCH 16/26] Make _vjpSubscript internal and inline. --- stdlib/public/core/SIMDVectorTypes.swift.gyb | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/stdlib/public/core/SIMDVectorTypes.swift.gyb b/stdlib/public/core/SIMDVectorTypes.swift.gyb index a740edbd6750d..90821a2786840 100644 --- a/stdlib/public/core/SIMDVectorTypes.swift.gyb +++ b/stdlib/public/core/SIMDVectorTypes.swift.gyb @@ -205,7 +205,8 @@ extension SIMD${n} : Differentiable where Scalar : Differentiable & BinaryFloati extension SIMD${n} where Scalar : Differentiable & BinaryFloatingPoint, Scalar.TangentVector : BinaryFloatingPoint { - public func _vjpSubscript(index: Int) + @usableFromInline + internal func _vjpSubscript(index: Int) -> (Scalar, (Scalar.TangentVector) -> TangentVector) { return (self[index], { v in var zeros = Self.zero From e4e96dd387232eec534cb3a460186ae3abaa4ea9 Mon Sep 17 00:00:00 2001 From: Bart Chrzaszcz Date: Thu, 16 May 2019 14:47:47 -0700 Subject: [PATCH 17/26] WIP: test for + doesn't seem to work, filed JIRA bug. --- stdlib/public/core/SIMDVector.swift | 10 +- stdlib/public/core/SIMDVectorTypes.swift.gyb | 4 +- test/AutoDiff/SIMD.swift | 328 +++++++++++++++++++ 3 files changed, 336 insertions(+), 6 deletions(-) create mode 100644 test/AutoDiff/SIMD.swift diff --git a/stdlib/public/core/SIMDVector.swift b/stdlib/public/core/SIMDVector.swift index a8f3e4ed78d99..14bc021d93665 100644 --- a/stdlib/public/core/SIMDVector.swift +++ b/stdlib/public/core/SIMDVector.swift @@ -786,7 +786,7 @@ extension SIMD where Scalar: FloatingPoint { where Self : Differentiable, Self.TangentVector : SIMD, Scalar : BinaryFloatingPoint, - Self.TangentVector.Scalar: BinaryFloatingPoint) + Self.TangentVector.Scalar : BinaryFloatingPoint) public static func +(lhs: Self, rhs: Self) -> Self { var result = Self() for i in result.indices { result[i] = lhs[i] + rhs[i] } @@ -795,11 +795,11 @@ extension SIMD where Scalar: FloatingPoint { @_transparent // SWIFT_ENABLE_TENSORFLOW - @differentiable(vjp: _vjpAdd(lhs:rhs:) + @differentiable(vjp: _vjpSubtract(lhs:rhs:) where Self: Differentiable, Self.TangentVector: SIMD, Scalar : BinaryFloatingPoint, - Self.TangentVector.Scalar: BinaryFloatingPoint) + Self.TangentVector.Scalar : BinaryFloatingPoint) public static func -(lhs: Self, rhs: Self) -> Self { var result = Self() for i in result.indices { result[i] = lhs[i] - rhs[i] } @@ -809,8 +809,8 @@ extension SIMD where Scalar: FloatingPoint { @_transparent // SWIFT_ENABLE_TENSORFLOW @differentiable(vjp: _vjpMultiply(lhs:rhs:) - where Self: Differentiable, - Self.TangentVector: SIMD, + where Self : Differentiable, + Self.TangentVector : SIMD, Scalar : BinaryFloatingPoint, Self.TangentVector == Self) public static func *(lhs: Self, rhs: Self) -> Self { diff --git a/stdlib/public/core/SIMDVectorTypes.swift.gyb b/stdlib/public/core/SIMDVectorTypes.swift.gyb index 90821a2786840..ca70c1421e67b 100644 --- a/stdlib/public/core/SIMDVectorTypes.swift.gyb +++ b/stdlib/public/core/SIMDVectorTypes.swift.gyb @@ -194,7 +194,9 @@ extension SIMD${n} : AdditiveArithmetic where Scalar : FloatingPoint {} extension SIMD${n} : VectorNumeric where Scalar : FloatingPoint {} -extension SIMD${n} : Differentiable where Scalar : Differentiable & BinaryFloatingPoint { +extension SIMD${n} : Differentiable + where Scalar : Differentiable & BinaryFloatingPoint, + Scalar.TangentVector: BinaryFloatingPoint { public typealias TangentVector = SIMD${n} public typealias AllDifferentiableVariables = SIMD${n} public func tangentVector(from cotangent: TangentVector) -> TangentVector { diff --git a/test/AutoDiff/SIMD.swift b/test/AutoDiff/SIMD.swift new file mode 100644 index 0000000000000..006571fdca7b5 --- /dev/null +++ b/test/AutoDiff/SIMD.swift @@ -0,0 +1,328 @@ +// RUN: %target-run-simple-swift +// REQUIRES: executable_test + +import StdlibUnittest +#if os(macOS) +import Darwin.C +#else +import Glibc +#endif + +var SIMDTests = TestSuite("SIMD") + +typealias FloatArrayGrad = SIMD4.CotangentVector + +SIMDTests.test("Addition") { + let a = SIMD4(1,2,3,4) + // let aGrad = FloatArrayGrad(1,2,3,4) + + // let foo = { (x: SIMD4) -> SIMD4 in + // return x + // } + // let backprop = pullback(at: a, in: foo) + + let foo1 = { (x: SIMD4, y: SIMD4) -> SIMD4 in + return x + y + } + let backprop1 = pullback(at: a, a, in: foo1) + // expectEqual((aGrad, aGrad), backprop(FloatArrayGrad(1,2,3,4))) + + // let foo1 = { (x: SIMD4, y: SIMD4) -> SIMD4 in + // return x + y + // } + // expectEqual(SIMD4(arrayLiteral: 1, 2, 3, 4), gradient(at: a, a, in: foo1)) +// let foo2 = { (x: SIMD4, y: Float) -> Float in +// return -x * y +// } +// expectEqual((-4, -3), gradient(at: 3, 4, in: foo2)) +// let foo3 = { (x: Float, y: SIMD4) -> Float in +// return -x + y +// } +// expectEqual((-1, 1), gradient(at: 3, 4, in: foo3)) +} +/* +SIMDTests.test("Fanout") { + let foo1 = { (x: Float) -> Float in + x - x + } + expectEqual(0, gradient(at: 100, in: foo1)) + let foo2 = { (x: Float) -> Float in + x + x + } + expectEqual(2, gradient(at: 100, in: foo2)) + let foo3 = { (x: Float, y: Float) -> Float in + x + x + x * y + } + expectEqual((4, 3), gradient(at: 3, 2, in: foo3)) +} + +SIMDTests.test("FunctionCall") { + func foo(_ x: Float, _ y: Float) -> Float { + return 3 * x + { $0 * 3 }(3) * y + } + expectEqual((3, 9), gradient(at: 3, 4, in: foo)) + expectEqual(3, gradient(at: 3) { x in foo(x, 4) }) +} + +SIMDTests.test("ResultSelection") { + func foo(_ x: Float, _ y: Float) -> (Float, Float) { + return (x + 1, y + 2) + } + expectEqual((1, 0), gradient(at: 3, 3, in: { x, y in foo(x, y).0 })) + expectEqual((0, 1), gradient(at: 3, 3, in: { x, y in foo(x, y).1 })) +} + +SIMDTests.test("CaptureLocal") { + let z: Float = 10 + func foo(_ x: Float) -> Float { + return z * x + } + expectEqual(10, gradient(at: 0, in: foo)) +} + +var globalVar: Float = 10 +SIMDTests.test("CaptureGlobal") { + let foo: (Float) -> Float = { x in + globalVar += 20 + return globalVar * x + } + expectEqual(30, gradient(at: 0, in: foo)) +} + +let foo: (Float) -> Float = { x in + return x * x +} +SIMDTests.test("GlobalLet") { + expectEqual(2, gradient(at: 1, in: foo)) +} + +var foo_diffable: @differentiable (Float) -> (Float) + = differentiableFunction { x in (x * x, { v in 2 * x * v }) } +SIMDTests.test("GlobalDiffableFunc") { + expectEqual(2, gradient(at: 1, in: foo_diffable)) + expectEqual(2, gradient(at: 1, in: { x in foo_diffable(x) })) + expectEqual(1, gradient(at: 1, in: { (x: Float) -> Float in + foo_diffable = { x in x + 1 }; + return foo_diffable(x) + })) + expectEqual(1, gradient(at: 1, in: foo_diffable)) +} + +SIMDTests.test("SideEffects") { + func fourthPower(x: Float) -> Float { + var a = x + a = a * x + a = a * x + return a * x + } + expectEqual(4 * 27, gradient(at: 3, in: fourthPower)) +} + +SIMDTests.test("TupleSideEffects") { + func foo(_ x: Float) -> Float { + var tuple = (x, x) + tuple.0 = tuple.0 * x + return x * tuple.0 + } + expectEqual(27, gradient(at: 3, in: foo)) + + func fifthPower(_ x: Float) -> Float { + var tuple = (x, x) + tuple.0 = tuple.0 * x + tuple.1 = tuple.0 * x + return tuple.0 * tuple.1 + } + expectEqual(405, gradient(at: 3, in: fifthPower)) + + func nested(_ x: Float) -> Float { + var tuple = ((x, x), x) + tuple.0.0 = tuple.0.0 * x + tuple.0.1 = tuple.0.0 * x + return tuple.0.0 * tuple.0.1 + } + expectEqual(405, gradient(at: 3, in: nested)) +} + +// Tests TF-321. +SIMDTests.test("TupleNonDifferentiableElements") { + func foo(_ x: Float) -> Float { + var tuple = (x, 1) + tuple.0 = x + tuple.1 = 1 + return tuple.0 + } + expectEqual(1, gradient(at: 1, in: foo)) + + func bar(_ x: Float) -> Float { + var tuple: (Int, Int, Float, Float) = (1, 1, x, x) + tuple.0 = 1 + tuple.1 = 1 + tuple.3 = x + return tuple.3 + } + expectEqual(1, gradient(at: 1, in: bar)) + + struct Wrapper { + @differentiable(where T : Differentiable) + func baz(_ x: T) -> T { + var tuple = (1, 1, x, 1) + tuple.0 = 1 + tuple.2 = x + tuple.3 = 1 + return tuple.2 + } + } + expectEqual(1, gradient(at: Float(1), in: { x -> Float in + let wrapper = Wrapper() + return wrapper.baz(x) + })) +} + +// Tests TF-21. +SIMDTests.test("StructMemberwiseInitializer") { + struct Foo : AdditiveArithmetic, Differentiable { + var stored: Float + var computed: Float { + return stored * stored + } + } + + let 𝛁foo = pullback(at: Float(4), in: { input -> Foo in + let foo = Foo(stored: input) + return foo + foo + })(Foo.CotangentVector(stored: 1)) + expectEqual(2, 𝛁foo) + + let 𝛁computed = gradient(at: Float(4)) { input -> Float in + let foo = Foo(stored: input) + return foo.computed + } + expectEqual(8, 𝛁computed) + + let 𝛁product = gradient(at: Float(4)) { input -> Float in + let foo = Foo(stored: input) + return foo.computed * foo.stored + } + expectEqual(16, 𝛁product) + + struct Custom : AdditiveArithmetic, Differentiable { + var x: Float + + // Custom initializer with `@differentiable`. + @differentiable + init(x: Float) { + print(x) + self.x = x + } + } + + let 𝛁custom = pullback(at: Float(4), in: { input -> Custom in + let foo = Custom(x: input) + return foo + foo + })(Custom.CotangentVector(x: 1)) + expectEqual(2, 𝛁custom) +} + +// Tests TF-319: struct with non-differentiable constant stored property. +SIMDTests.test("StructConstantStoredProperty") { + struct TF_319 : Differentiable { + var x: Float + @noDerivative let constant = Float(2) + + @differentiable + init(x: Float) { + self.x = x + } + + @differentiable(wrt: (self, input)) + func applied(to input: Float) -> Float { + return x * constant * input + } + } + func testStructInit(to input: Float) -> Float { + let model = TF_319(x: 10) + return model.applied(to: input) + } + expectEqual(TF_319.CotangentVector(x: 6), + gradient(at: TF_319(x: 10), in: { $0.applied(to: 3) })) + expectEqual(20, gradient(at: 3, in: testStructInit)) +} + +SIMDTests.test("StructSideEffects") { + struct Point : AdditiveArithmetic, Differentiable { + var x: Float + var y: Float + var z: Float + } + + func double(_ input: Float) -> Point { + let point = Point(x: input, y: input, z: input) + return point + point + } + expectEqual(6, pullback(at: 4, in: double)(Point(x: 1, y: 1, z: 1))) + + func fifthPower(_ input: Float) -> Float { + var point = Point(x: input, y: input, z: input) + point.x = point.x * input + point.y = point.x * input + return point.x * point.y + } + expectEqual(405, gradient(at: 3, in: fifthPower)) + + func mix(_ input: Float) -> Float { + var tuple = (point: Point(x: input, y: input, z: input), float: input) + tuple.point.x = tuple.point.x * tuple.float + tuple.point.y = tuple.point.x * input + return tuple.point.x * tuple.point.y + } + expectEqual(405, gradient(at: 3, in: mix)) + + // Test TF-282. + struct Add : Differentiable { + var bias: Float + func applied(to input: Float) -> Float { + var tmp = input + tmp = tmp + bias + return tmp + } + } + let model = Add(bias: 1) + expectEqual(Add.CotangentVector(bias: 1), gradient(at: model) { m in m.applied(to: 1) }) +} + +SIMDTests.test("StructGeneric") { + struct Generic : AdditiveArithmetic, Differentiable { + var x: T + var y: T + var z: T + } + + let 𝛁generic = pullback(at: Float(3), in: { input -> Generic in + var generic = Generic(x: input, y: input, z: input) + return generic + })(Generic.CotangentVector(x: 1, y: 1, z: 1)) + expectEqual(3, 𝛁generic) + + func fifthPower(_ input: Float) -> Float { + var generic = Generic(x: input, y: input, z: input) + generic.x = generic.x * input + generic.y = generic.x * input + return generic.x * generic.y + } + // FIXME(TF-274): The true expected result is `405`, like other variants of `fifthPower` above. + expectEqual(405, gradient(at: 3, in: fifthPower)) +} + +SIMDTests.test("SubsetIndices") { + func grad(_ lossFunction: @differentiable (Float, Float) -> Float) -> Float { + return gradient(at: 1) { x in lossFunction(x * x, 10.0) } + } + expectEqual(2, grad { x, y in x + y }) + + func gradWRTNonDiff(_ lossFunction: @differentiable (Float, @nondiff Int) -> Float) -> Float { + return gradient(at: 2) { x in lossFunction(x * x, 10) } + } + expectEqual(4, gradWRTNonDiff { x, y in x + Float(y) }) +} +*/ +runAllTests() From 9cf828e701f2602281453e40935e554894333c56 Mon Sep 17 00:00:00 2001 From: Bart Chrzaszcz Date: Tue, 28 May 2019 17:51:19 -0700 Subject: [PATCH 18/26] WIP: add back functions --- stdlib/public/core/SIMDVector.swift | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/stdlib/public/core/SIMDVector.swift b/stdlib/public/core/SIMDVector.swift index 14bc021d93665..6a02942c3ef05 100644 --- a/stdlib/public/core/SIMDVector.swift +++ b/stdlib/public/core/SIMDVector.swift @@ -779,7 +779,7 @@ extension SIMD where Scalar: FixedWidthInteger { // Implementations of floating-point operations. These should eventually all // be replaced with @_semantics to lower directly to vector IR nodes. -extension SIMD where Scalar: FloatingPoint { +extension SIMD where Scalar : FloatingPoint { @_transparent // SWIFT_ENABLE_TENSORFLOW @differentiable(vjp: _vjpAdd(lhs:rhs:) @@ -1282,6 +1282,15 @@ extension SIMD where Scalar: FloatingPoint { return lhs / Self(repeating: rhs) } +// @_transparent +// public static func +=(lhs: inout Self, rhs: Self) { +// lhs = lhs + rhs +// } +// +// @_transparent +// public static func -=(lhs: inout Self, rhs: Self) { +// lhs = lhs - rhs +// } @_transparent public static func *=(lhs: inout Self, rhs: Self) { lhs = lhs * rhs @@ -1292,6 +1301,10 @@ extension SIMD where Scalar: FloatingPoint { lhs = lhs / rhs } +// @_transparent +// public static func *=(lhs: inout Self, rhs: Scalar) { +// lhs = lhs * rhs +// } @_transparent public static func +=(lhs: inout Self, rhs: Scalar) { lhs = lhs + rhs From 85473be899a89a654b281cc4ce10fc3734dc8976 Mon Sep 17 00:00:00 2001 From: Bart Chrzaszcz Date: Thu, 30 May 2019 11:01:29 -0700 Subject: [PATCH 19/26] Make negation diff & start fixing tests. --- stdlib/public/core/SIMDVector.swift | 15 +++++++- test/AutoDiff/SIMD.swift | 57 ++++++++++++++--------------- 2 files changed, 41 insertions(+), 31 deletions(-) diff --git a/stdlib/public/core/SIMDVector.swift b/stdlib/public/core/SIMDVector.swift index 6a02942c3ef05..e6cf3b7e0cd71 100644 --- a/stdlib/public/core/SIMDVector.swift +++ b/stdlib/public/core/SIMDVector.swift @@ -1182,6 +1182,11 @@ extension SIMD where Scalar: FixedWidthInteger { extension SIMD where Scalar: FloatingPoint { @_transparent + @differentiable(vjp: _vjpNegate(rhs:) + where Self: Differentiable, + Self.TangentVector: SIMD, + Scalar : BinaryFloatingPoint, + Self.TangentVector.Scalar : BinaryFloatingPoint) public static prefix func -(rhs: Self) -> Self { return 0 - rhs } @@ -1506,10 +1511,18 @@ extension SIMD static func _vjpSubtract( lhs: Self, rhs: Self ) -> (Self, (TangentVector) -> (TangentVector, TangentVector)) { - return (lhs - rhs, { (v: TangentVector) in + return (lhs - rhs, { v in return (v, -v) }) } + + @inlinable + static func _vjpNegate(rhs: Self) + -> (Self, (TangentVector) -> (TangentVector)) { + return (-rhs, { v in + return -v + }) + } } extension SIMD diff --git a/test/AutoDiff/SIMD.swift b/test/AutoDiff/SIMD.swift index 006571fdca7b5..1ba856765cb4f 100644 --- a/test/AutoDiff/SIMD.swift +++ b/test/AutoDiff/SIMD.swift @@ -10,37 +10,34 @@ import Glibc var SIMDTests = TestSuite("SIMD") -typealias FloatArrayGrad = SIMD4.CotangentVector - SIMDTests.test("Addition") { let a = SIMD4(1,2,3,4) - // let aGrad = FloatArrayGrad(1,2,3,4) - // let foo = { (x: SIMD4) -> SIMD4 in - // return x - // } - // let backprop = pullback(at: a, in: foo) + let foo1 = { (x: SIMD4) -> SIMD4 in + return x + } + let bp1 = pullback(at: a, in: foo1) + expectEqual(a, bp1(a)) - let foo1 = { (x: SIMD4, y: SIMD4) -> SIMD4 in + let foo2 = { (x: SIMD4, y: SIMD4) -> SIMD4 in return x + y } - let backprop1 = pullback(at: a, a, in: foo1) - // expectEqual((aGrad, aGrad), backprop(FloatArrayGrad(1,2,3,4))) - - // let foo1 = { (x: SIMD4, y: SIMD4) -> SIMD4 in - // return x + y - // } - // expectEqual(SIMD4(arrayLiteral: 1, 2, 3, 4), gradient(at: a, a, in: foo1)) -// let foo2 = { (x: SIMD4, y: Float) -> Float in -// return -x * y -// } -// expectEqual((-4, -3), gradient(at: 3, 4, in: foo2)) -// let foo3 = { (x: Float, y: SIMD4) -> Float in -// return -x + y -// } -// expectEqual((-1, 1), gradient(at: 3, 4, in: foo3)) + let bp2 = pullback(at: a, a, in: foo2) + expectEqual((a, a), bp2(a)) + + let foo3 = { (x: SIMD4, y: Float) -> SIMD4 in + return -x * y + } + let bp3 = pullback(at: a, 5, in: foo3) + expectEqual((a, 1), bp3(a)) + + let foo4 = { (x: Float, y: SIMD4) -> SIMD4 in + return -x + y + } + let bp4 = pullback(at: 5, a, in: foo4) + expectEqual((1, a), bp4(a)) } -/* + SIMDTests.test("Fanout") { let foo1 = { (x: Float) -> Float in x - x @@ -190,7 +187,7 @@ SIMDTests.test("StructMemberwiseInitializer") { let 𝛁foo = pullback(at: Float(4), in: { input -> Foo in let foo = Foo(stored: input) return foo + foo - })(Foo.CotangentVector(stored: 1)) + })(Foo.TangentVector(stored: 1)) expectEqual(2, 𝛁foo) let 𝛁computed = gradient(at: Float(4)) { input -> Float in @@ -219,7 +216,7 @@ SIMDTests.test("StructMemberwiseInitializer") { let 𝛁custom = pullback(at: Float(4), in: { input -> Custom in let foo = Custom(x: input) return foo + foo - })(Custom.CotangentVector(x: 1)) + })(Custom.TangentVector(x: 1)) expectEqual(2, 𝛁custom) } @@ -243,7 +240,7 @@ SIMDTests.test("StructConstantStoredProperty") { let model = TF_319(x: 10) return model.applied(to: input) } - expectEqual(TF_319.CotangentVector(x: 6), + expectEqual(TF_319.TangentVector(x: 6), gradient(at: TF_319(x: 10), in: { $0.applied(to: 3) })) expectEqual(20, gradient(at: 3, in: testStructInit)) } @@ -287,7 +284,7 @@ SIMDTests.test("StructSideEffects") { } } let model = Add(bias: 1) - expectEqual(Add.CotangentVector(bias: 1), gradient(at: model) { m in m.applied(to: 1) }) + expectEqual(Add.TangentVector(bias: 1), gradient(at: model) { m in m.applied(to: 1) }) } SIMDTests.test("StructGeneric") { @@ -300,7 +297,7 @@ SIMDTests.test("StructGeneric") { let 𝛁generic = pullback(at: Float(3), in: { input -> Generic in var generic = Generic(x: input, y: input, z: input) return generic - })(Generic.CotangentVector(x: 1, y: 1, z: 1)) + })(Generic.TangentVector(x: 1, y: 1, z: 1)) expectEqual(3, 𝛁generic) func fifthPower(_ input: Float) -> Float { @@ -324,5 +321,5 @@ SIMDTests.test("SubsetIndices") { } expectEqual(4, gradWRTNonDiff { x, y in x + Float(y) }) } -*/ + runAllTests() From 99b8408cb5ca0569b961806d9af4c0173534e043 Mon Sep 17 00:00:00 2001 From: Bart Chrzaszcz Date: Sun, 2 Jun 2019 14:53:24 -0700 Subject: [PATCH 20/26] Fix bugs in code and add tests TODO: - Determine whether sum() needs to be differentiated - See what other inits I can differentiate - Remove some of the custom VJPs now that init(repeating:) is differentiable --- stdlib/public/core/SIMDVector.swift | 60 ++- stdlib/public/core/SIMDVectorTypes.swift.gyb | 2 +- test/AutoDiff/SIMD.swift | 374 +++++-------------- 3 files changed, 133 insertions(+), 303 deletions(-) diff --git a/stdlib/public/core/SIMDVector.swift b/stdlib/public/core/SIMDVector.swift index e6cf3b7e0cd71..5bfe8782ca854 100644 --- a/stdlib/public/core/SIMDVector.swift +++ b/stdlib/public/core/SIMDVector.swift @@ -90,6 +90,13 @@ extension SIMD { /// A vector with the specified value in all lanes. @_transparent + // SWIFT_ENABLE_TENSORFLOW + @differentiable(vjp: _vjpInit(repeating:) + where Self : Differentiable, + Self.TangentVector : SIMD, + Scalar : BinaryFloatingPoint & Differentiable, + Self.TangentVector == Self, + Scalar.TangentVector == Scalar) public init(repeating value: Scalar) { self.init() for i in indices { self[i] = value } @@ -820,6 +827,12 @@ extension SIMD where Scalar : FloatingPoint { } @_transparent + // SWIFT_ENABLE_TENSORFLOW + @differentiable(vjp: _vjpDivide(lhs:rhs:) + where Self : Differentiable, + Self.TangentVector : SIMD, + Scalar : BinaryFloatingPoint, + Self.TangentVector == Self) public static func /(lhs: Self, rhs: Self) -> Self { var result = Self() for i in result.indices { result[i] = lhs[i] / rhs[i] } @@ -862,12 +875,12 @@ extension SIMD where Scalar : FloatingPoint { /// Returns the sum of the scalars in the vector. @_alwaysEmitIntoClient // SWIFT_ENABLE_TENSORFLOW - @differentiable(vjp: _vjpSum - where Self : Differentiable, - Self.TangentVector : SIMD, - Scalar : BinaryFloatingPoint & Differentiable, - Scalar.TangentVector : BinaryFloatingPoint, - Self.TangentVector == Self) +// @differentiable(vjp: _vjpSum +// where Self : Differentiable, +// Self.TangentVector : SIMD, +// Scalar : BinaryFloatingPoint & Differentiable, +// Scalar.TangentVector : BinaryFloatingPoint, +// Self.TangentVector == Self) public func sum() -> Scalar { // Implementation note: this eventually be defined to lower to either // llvm.experimental.vector.reduce.fadd or an explicit tree-sum. Open- @@ -1291,11 +1304,12 @@ extension SIMD where Scalar: FloatingPoint { // public static func +=(lhs: inout Self, rhs: Self) { // lhs = lhs + rhs // } -// + // @_transparent // public static func -=(lhs: inout Self, rhs: Self) { // lhs = lhs - rhs // } + @_transparent public static func *=(lhs: inout Self, rhs: Self) { lhs = lhs * rhs @@ -1310,6 +1324,7 @@ extension SIMD where Scalar: FloatingPoint { // public static func *=(lhs: inout Self, rhs: Scalar) { // lhs = lhs * rhs // } + @_transparent public static func +=(lhs: inout Self, rhs: Scalar) { lhs = lhs + rhs @@ -1568,7 +1583,7 @@ extension SIMD static func _vjpSubtract( lhs: Scalar, rhs: Self ) -> (Self, (TangentVector) -> (Scalar.TangentVector, TangentVector)) { - return (lhs + rhs, { v in + return (lhs - rhs, { v in return (v.sum(), -v) }) } @@ -1586,7 +1601,7 @@ extension SIMD static func _vjpSubtract( lhs: Self, rhs: Scalar ) -> (Self, (TangentVector) -> (TangentVector, Scalar.TangentVector)) { - return (lhs + rhs, { v in + return (lhs - rhs, { v in return (v, -v.sum()) }) } @@ -1612,7 +1627,7 @@ extension SIMD lhs: Self, rhs: Scalar ) -> (Self, (TangentVector) -> (TangentVector, Scalar.TangentVector)) { return (lhs / rhs, { v in - (-lhs / (rhs * rhs) * v, (v / rhs).sum()) + (v / rhs, (-lhs / (rhs * rhs) * v).sum()) }) } @@ -1621,7 +1636,7 @@ extension SIMD lhs: Scalar, rhs: Self ) -> (Self, (TangentVector) -> (Scalar.TangentVector, TangentVector)) { return (lhs * rhs, { v in - return ((v * lhs).sum(), v * rhs) + return ((v * rhs).sum(), v * lhs) }) } @@ -1635,14 +1650,27 @@ extension SIMD } } +//extension SIMD +// where Self : Differentiable, +// TangentVector : SIMD, +// Scalar : BinaryFloatingPoint & Differentiable, +// Scalar.TangentVector : BinaryFloatingPoint, +// TangentVector == Self { +// @usableFromInline +// func _vjpSum() -> (Scalar, (Scalar.TangentVector) -> TangentVector) { +// return (sum(), { v in Self(repeating: Scalar(v)) }) +// } +//} + extension SIMD where Self : Differentiable, - TangentVector : SIMD, + Self.TangentVector : SIMD, Scalar : BinaryFloatingPoint & Differentiable, - Scalar.TangentVector : BinaryFloatingPoint, - TangentVector == Self { + Self.TangentVector == Self, + Scalar.TangentVector == Scalar { @usableFromInline - func _vjpSum() -> (Scalar, (Scalar.TangentVector) -> TangentVector) { - return (sum(), { v in Self(repeating: Scalar(v)) }) + static func _vjpInit(repeating value: Scalar) -> + (Self, (TangentVector) -> Scalar.TangentVector) { + return (Self(repeating: value), { v in v.sum() }) } } diff --git a/stdlib/public/core/SIMDVectorTypes.swift.gyb b/stdlib/public/core/SIMDVectorTypes.swift.gyb index ca70c1421e67b..c04734687683a 100644 --- a/stdlib/public/core/SIMDVectorTypes.swift.gyb +++ b/stdlib/public/core/SIMDVectorTypes.swift.gyb @@ -192,7 +192,7 @@ extension SIMD${n} where Scalar: BinaryFloatingPoint { // SWIFT_ENABLE_TENSORFLOW extension SIMD${n} : AdditiveArithmetic where Scalar : FloatingPoint {} -extension SIMD${n} : VectorNumeric where Scalar : FloatingPoint {} +//extension SIMD${n} : VectorNumeric where Scalar : FloatingPoint {} extension SIMD${n} : Differentiable where Scalar : Differentiable & BinaryFloatingPoint, diff --git a/test/AutoDiff/SIMD.swift b/test/AutoDiff/SIMD.swift index 1ba856765cb4f..54ef6876c2ada 100644 --- a/test/AutoDiff/SIMD.swift +++ b/test/AutoDiff/SIMD.swift @@ -10,316 +10,118 @@ import Glibc var SIMDTests = TestSuite("SIMD") -SIMDTests.test("Addition") { - let a = SIMD4(1,2,3,4) - +SIMDTests.test("Identity") { + let a = SIMD4(1, 2, 3, 4) + let foo1 = { (x: SIMD4) -> SIMD4 in return x } let bp1 = pullback(at: a, in: foo1) expectEqual(a, bp1(a)) - - let foo2 = { (x: SIMD4, y: SIMD4) -> SIMD4 in - return x + y - } - let bp2 = pullback(at: a, a, in: foo2) - expectEqual((a, a), bp2(a)) - - let foo3 = { (x: SIMD4, y: Float) -> SIMD4 in - return -x * y - } - let bp3 = pullback(at: a, 5, in: foo3) - expectEqual((a, 1), bp3(a)) - - let foo4 = { (x: Float, y: SIMD4) -> SIMD4 in - return -x + y - } - let bp4 = pullback(at: 5, a, in: foo4) - expectEqual((1, a), bp4(a)) -} - -SIMDTests.test("Fanout") { - let foo1 = { (x: Float) -> Float in - x - x - } - expectEqual(0, gradient(at: 100, in: foo1)) - let foo2 = { (x: Float) -> Float in - x + x - } - expectEqual(2, gradient(at: 100, in: foo2)) - let foo3 = { (x: Float, y: Float) -> Float in - x + x + x * y - } - expectEqual((4, 3), gradient(at: 3, 2, in: foo3)) -} - -SIMDTests.test("FunctionCall") { - func foo(_ x: Float, _ y: Float) -> Float { - return 3 * x + { $0 * 3 }(3) * y - } - expectEqual((3, 9), gradient(at: 3, 4, in: foo)) - expectEqual(3, gradient(at: 3) { x in foo(x, 4) }) -} - -SIMDTests.test("ResultSelection") { - func foo(_ x: Float, _ y: Float) -> (Float, Float) { - return (x + 1, y + 2) - } - expectEqual((1, 0), gradient(at: 3, 3, in: { x, y in foo(x, y).0 })) - expectEqual((0, 1), gradient(at: 3, 3, in: { x, y in foo(x, y).1 })) -} - -SIMDTests.test("CaptureLocal") { - let z: Float = 10 - func foo(_ x: Float) -> Float { - return z * x - } - expectEqual(10, gradient(at: 0, in: foo)) -} - -var globalVar: Float = 10 -SIMDTests.test("CaptureGlobal") { - let foo: (Float) -> Float = { x in - globalVar += 20 - return globalVar * x - } - expectEqual(30, gradient(at: 0, in: foo)) -} - -let foo: (Float) -> Float = { x in - return x * x -} -SIMDTests.test("GlobalLet") { - expectEqual(2, gradient(at: 1, in: foo)) -} - -var foo_diffable: @differentiable (Float) -> (Float) - = differentiableFunction { x in (x * x, { v in 2 * x * v }) } -SIMDTests.test("GlobalDiffableFunc") { - expectEqual(2, gradient(at: 1, in: foo_diffable)) - expectEqual(2, gradient(at: 1, in: { x in foo_diffable(x) })) - expectEqual(1, gradient(at: 1, in: { (x: Float) -> Float in - foo_diffable = { x in x + 1 }; - return foo_diffable(x) - })) - expectEqual(1, gradient(at: 1, in: foo_diffable)) -} - -SIMDTests.test("SideEffects") { - func fourthPower(x: Float) -> Float { - var a = x - a = a * x - a = a * x - return a * x - } - expectEqual(4 * 27, gradient(at: 3, in: fourthPower)) -} - -SIMDTests.test("TupleSideEffects") { - func foo(_ x: Float) -> Float { - var tuple = (x, x) - tuple.0 = tuple.0 * x - return x * tuple.0 - } - expectEqual(27, gradient(at: 3, in: foo)) - - func fifthPower(_ x: Float) -> Float { - var tuple = (x, x) - tuple.0 = tuple.0 * x - tuple.1 = tuple.0 * x - return tuple.0 * tuple.1 - } - expectEqual(405, gradient(at: 3, in: fifthPower)) - - func nested(_ x: Float) -> Float { - var tuple = ((x, x), x) - tuple.0.0 = tuple.0.0 * x - tuple.0.1 = tuple.0.0 * x - return tuple.0.0 * tuple.0.1 - } - expectEqual(405, gradient(at: 3, in: nested)) } -// Tests TF-321. -SIMDTests.test("TupleNonDifferentiableElements") { - func foo(_ x: Float) -> Float { - var tuple = (x, 1) - tuple.0 = x - tuple.1 = 1 - return tuple.0 - } - expectEqual(1, gradient(at: 1, in: foo)) - - func bar(_ x: Float) -> Float { - var tuple: (Int, Int, Float, Float) = (1, 1, x, x) - tuple.0 = 1 - tuple.1 = 1 - tuple.3 = x - return tuple.3 - } - expectEqual(1, gradient(at: 1, in: bar)) - - struct Wrapper { - @differentiable(where T : Differentiable) - func baz(_ x: T) -> T { - var tuple = (1, 1, x, 1) - tuple.0 = 1 - tuple.2 = x - tuple.3 = 1 - return tuple.2 - } +SIMDTests.test("Negate") { + let a = SIMD4(1, 2, 3, 4) + + let foo1 = { (x: SIMD4) -> SIMD4 in + return -x } - expectEqual(1, gradient(at: Float(1), in: { x -> Float in - let wrapper = Wrapper() - return wrapper.baz(x) - })) + let bp1 = pullback(at: a, in: foo1) + expectEqual(-a, bp1(a)) } -// Tests TF-21. -SIMDTests.test("StructMemberwiseInitializer") { - struct Foo : AdditiveArithmetic, Differentiable { - var stored: Float - var computed: Float { - return stored * stored - } - } - - let 𝛁foo = pullback(at: Float(4), in: { input -> Foo in - let foo = Foo(stored: input) - return foo + foo - })(Foo.TangentVector(stored: 1)) - expectEqual(2, 𝛁foo) - - let 𝛁computed = gradient(at: Float(4)) { input -> Float in - let foo = Foo(stored: input) - return foo.computed - } - expectEqual(8, 𝛁computed) - - let 𝛁product = gradient(at: Float(4)) { input -> Float in - let foo = Foo(stored: input) - return foo.computed * foo.stored - } - expectEqual(16, 𝛁product) - - struct Custom : AdditiveArithmetic, Differentiable { - var x: Float +//SIMDTests.test("Sum") { +// let a = SIMD4(1, 2, 3, 4) +// +// let foo1 = { (x: SIMD4) -> Float in +// return x.sum() +// } +// let bp1 = pullback(at: a, in: foo1) +// expectEqual(SIMD4(3, 3, 3, 3), bp1(3)) +//} - // Custom initializer with `@differentiable`. - @differentiable - init(x: Float) { - print(x) - self.x = x - } +SIMDTests.test("Addition") { + let a = SIMD4(1, 2, 3, 4) + let g = SIMD4(1, 1, 1, 1) + + // SIMD + SIMD + let foo1 = { (x: SIMD4, y: SIMD4) -> SIMD4 in + return x + y } - - let 𝛁custom = pullback(at: Float(4), in: { input -> Custom in - let foo = Custom(x: input) - return foo + foo - })(Custom.TangentVector(x: 1)) - expectEqual(2, 𝛁custom) -} - -// Tests TF-319: struct with non-differentiable constant stored property. -SIMDTests.test("StructConstantStoredProperty") { - struct TF_319 : Differentiable { - var x: Float - @noDerivative let constant = Float(2) - - @differentiable - init(x: Float) { - self.x = x - } - - @differentiable(wrt: (self, input)) - func applied(to input: Float) -> Float { - return x * constant * input - } + let bp1 = pullback(at: a, a, in: foo1) + expectEqual((g, g), bp1(g)) + + // SIMD + Scalar + let foo2 = { (x: SIMD4, y: Float) -> SIMD4 in + return x + y } - func testStructInit(to input: Float) -> Float { - let model = TF_319(x: 10) - return model.applied(to: input) + let bp2 = pullback(at: a, 5, in: foo2) + expectEqual((g, 4), bp2(g)) + + // Scalar + SIMD + let foo3 = { (x: SIMD4, y: Float) -> SIMD4 in + return y + x } - expectEqual(TF_319.TangentVector(x: 6), - gradient(at: TF_319(x: 10), in: { $0.applied(to: 3) })) - expectEqual(20, gradient(at: 3, in: testStructInit)) + let bp3 = pullback(at: a, 5, in: foo3) + expectEqual((g, 4), bp3(g)) } -SIMDTests.test("StructSideEffects") { - struct Point : AdditiveArithmetic, Differentiable { - var x: Float - var y: Float - var z: Float - } - - func double(_ input: Float) -> Point { - let point = Point(x: input, y: input, z: input) - return point + point - } - expectEqual(6, pullback(at: 4, in: double)(Point(x: 1, y: 1, z: 1))) - - func fifthPower(_ input: Float) -> Float { - var point = Point(x: input, y: input, z: input) - point.x = point.x * input - point.y = point.x * input - return point.x * point.y - } - expectEqual(405, gradient(at: 3, in: fifthPower)) - - func mix(_ input: Float) -> Float { - var tuple = (point: Point(x: input, y: input, z: input), float: input) - tuple.point.x = tuple.point.x * tuple.float - tuple.point.y = tuple.point.x * input - return tuple.point.x * tuple.point.y - } - expectEqual(405, gradient(at: 3, in: mix)) +SIMDTests.test("Multiplication") { + let a = SIMD4(1, 2, 3, 4) + let g = SIMD4(1, 1, 1, 1) - // Test TF-282. - struct Add : Differentiable { - var bias: Float - func applied(to input: Float) -> Float { - var tmp = input - tmp = tmp + bias - return tmp - } + // SIMD * SIMD + let foo1 = { (x: SIMD4, y: SIMD4) -> SIMD4 in + return x * y } - let model = Add(bias: 1) - expectEqual(Add.TangentVector(bias: 1), gradient(at: model) { m in m.applied(to: 1) }) -} + let bp1 = pullback(at: a, a, in: foo1) + expectEqual((a, a), bp1(g)) -SIMDTests.test("StructGeneric") { - struct Generic : AdditiveArithmetic, Differentiable { - var x: T - var y: T - var z: T + // SIMD * Scalar + let foo2 = { (x: SIMD4, y: Float) -> SIMD4 in + return x * y } + let bp2 = pullback(at: a, 5, in: foo2) + expectEqual((SIMD4(5, 5, 5, 5), 10), bp2(g)) - let 𝛁generic = pullback(at: Float(3), in: { input -> Generic in - var generic = Generic(x: input, y: input, z: input) - return generic - })(Generic.TangentVector(x: 1, y: 1, z: 1)) - expectEqual(3, 𝛁generic) - - func fifthPower(_ input: Float) -> Float { - var generic = Generic(x: input, y: input, z: input) - generic.x = generic.x * input - generic.y = generic.x * input - return generic.x * generic.y + // Scalar * SIMD + let foo3 = { (x: SIMD4, y: Float) -> SIMD4 in + return y * x } - // FIXME(TF-274): The true expected result is `405`, like other variants of `fifthPower` above. - expectEqual(405, gradient(at: 3, in: fifthPower)) + let bp3 = pullback(at: a, 5, in: foo3) + expectEqual((SIMD4(5, 5, 5, 5), 10), bp3(g)) } -SIMDTests.test("SubsetIndices") { - func grad(_ lossFunction: @differentiable (Float, Float) -> Float) -> Float { - return gradient(at: 1) { x in lossFunction(x * x, 10.0) } - } - expectEqual(2, grad { x, y in x + y }) - - func gradWRTNonDiff(_ lossFunction: @differentiable (Float, @nondiff Int) -> Float) -> Float { - return gradient(at: 2) { x in lossFunction(x * x, 10) } - } - expectEqual(4, gradWRTNonDiff { x, y in x + Float(y) }) +SIMDTests.test("Division") { + let a = SIMD4(1, 2, 3, 4) + let g = SIMD4(1, 1, 1, 1) + + // SIMD / SIMD + let foo1 = { (x: SIMD4, y: SIMD4) -> SIMD4 in + return x / y + } + let bp1 = pullback(at: a, a, in: foo1) + let dlhs1 = g / a + let drhs1 = -1 / a + expectEqual((dlhs1, drhs1), bp1(g)) + + // SIMD / Scalar + let foo2 = { (x: SIMD4, y: Float) -> SIMD4 in + return x / y + } + let bp2 = pullback(at: a, 5, in: foo2) + let dlhs2 = g / 5 + let drhs2 = (-a / 25 * g).sum() + expectEqual((dlhs2, drhs2), bp2(g)) + + // Scalar / SIMD + let foo3 = { (x: Float, y: SIMD4) -> SIMD4 in + return x / y + } + let dlhs3 = (g / a).sum() + let drhs3 = -5 / (a*a) * g + let bp3 = pullback(at: 5, a, in: foo3) + expectEqual((dlhs3, drhs3), bp3(g)) } runAllTests() From ffdb4c49575dfb21e41644f3cc162d5c22eec0a1 Mon Sep 17 00:00:00 2001 From: Bart Chrzaszcz Date: Sun, 2 Jun 2019 18:10:21 -0700 Subject: [PATCH 21/26] Get sum() differentiable. --- lib/Sema/TypeCheckAttr.cpp | 3 ++- stdlib/public/core/SIMDVector.swift | 38 +++++++++++++++-------------- test/AutoDiff/SIMD.swift | 18 +++++++------- 3 files changed, 31 insertions(+), 28 deletions(-) diff --git a/lib/Sema/TypeCheckAttr.cpp b/lib/Sema/TypeCheckAttr.cpp index 60e522d32df97..60b58f29a080a 100644 --- a/lib/Sema/TypeCheckAttr.cpp +++ b/lib/Sema/TypeCheckAttr.cpp @@ -2628,7 +2628,8 @@ static FuncDecl *resolveAutoDiffAssociatedFunction( auto isABIPublic = [&](AbstractFunctionDecl *func) { return func->getFormalAccess() >= AccessLevel::Public || func->getAttrs().hasAttribute() || - func->getAttrs().hasAttribute(); + func->getAttrs().hasAttribute() || + func->getAttrs().hasAttribute(); }; // If the original function is exported (i.e. it is public or diff --git a/stdlib/public/core/SIMDVector.swift b/stdlib/public/core/SIMDVector.swift index 5bfe8782ca854..eee73c5bb16be 100644 --- a/stdlib/public/core/SIMDVector.swift +++ b/stdlib/public/core/SIMDVector.swift @@ -873,14 +873,16 @@ extension SIMD where Scalar : FloatingPoint { } /// Returns the sum of the scalars in the vector. - @_alwaysEmitIntoClient // SWIFT_ENABLE_TENSORFLOW -// @differentiable(vjp: _vjpSum -// where Self : Differentiable, -// Self.TangentVector : SIMD, -// Scalar : BinaryFloatingPoint & Differentiable, -// Scalar.TangentVector : BinaryFloatingPoint, -// Self.TangentVector == Self) + @inlinable + // FIXME: TF-545 we want the sum() func to be marked as + // `@_alwaysEmitIntoClient` like before when we define the VJP + @differentiable(vjp: _vjpSum + where Self : Differentiable, + Self.TangentVector : SIMD, + Scalar : BinaryFloatingPoint & Differentiable, + Scalar.TangentVector : BinaryFloatingPoint, + Self.TangentVector == Self) public func sum() -> Scalar { // Implementation note: this eventually be defined to lower to either // llvm.experimental.vector.reduce.fadd or an explicit tree-sum. Open- @@ -1650,17 +1652,17 @@ extension SIMD } } -//extension SIMD -// where Self : Differentiable, -// TangentVector : SIMD, -// Scalar : BinaryFloatingPoint & Differentiable, -// Scalar.TangentVector : BinaryFloatingPoint, -// TangentVector == Self { -// @usableFromInline -// func _vjpSum() -> (Scalar, (Scalar.TangentVector) -> TangentVector) { -// return (sum(), { v in Self(repeating: Scalar(v)) }) -// } -//} +extension SIMD + where Self : Differentiable, + TangentVector : SIMD, + Scalar : BinaryFloatingPoint & Differentiable, + Scalar.TangentVector : BinaryFloatingPoint, + TangentVector == Self { + @inlinable + func _vjpSum() -> (Scalar, (Scalar.TangentVector) -> TangentVector) { + return (sum(), { v in Self(repeating: Scalar(v)) }) + } +} extension SIMD where Self : Differentiable, diff --git a/test/AutoDiff/SIMD.swift b/test/AutoDiff/SIMD.swift index 54ef6876c2ada..1fbf9a3589b42 100644 --- a/test/AutoDiff/SIMD.swift +++ b/test/AutoDiff/SIMD.swift @@ -30,15 +30,15 @@ SIMDTests.test("Negate") { expectEqual(-a, bp1(a)) } -//SIMDTests.test("Sum") { -// let a = SIMD4(1, 2, 3, 4) -// -// let foo1 = { (x: SIMD4) -> Float in -// return x.sum() -// } -// let bp1 = pullback(at: a, in: foo1) -// expectEqual(SIMD4(3, 3, 3, 3), bp1(3)) -//} +SIMDTests.test("Sum") { + let a = SIMD4(1, 2, 3, 4) + + let foo1 = { (x: SIMD4) -> Float in + return x.sum() + } + let bp1 = pullback(at: a, in: foo1) + expectEqual(SIMD4(3, 3, 3, 3), bp1(3)) +} SIMDTests.test("Addition") { let a = SIMD4(1, 2, 3, 4) From c8df93f565a11d5c177221852c2bb25de9ed3254 Mon Sep 17 00:00:00 2001 From: Bart Chrzaszcz Date: Sun, 2 Jun 2019 18:55:24 -0700 Subject: [PATCH 22/26] Cleanup and additional init(repeating:) test. --- lib/Sema/TypeCheckAttr.cpp | 3 +-- stdlib/public/core/SIMDVector.swift | 37 +++++++++++++++-------------- test/AutoDiff/SIMD.swift | 10 ++++++++ 3 files changed, 30 insertions(+), 20 deletions(-) diff --git a/lib/Sema/TypeCheckAttr.cpp b/lib/Sema/TypeCheckAttr.cpp index 60b58f29a080a..60e522d32df97 100644 --- a/lib/Sema/TypeCheckAttr.cpp +++ b/lib/Sema/TypeCheckAttr.cpp @@ -2628,8 +2628,7 @@ static FuncDecl *resolveAutoDiffAssociatedFunction( auto isABIPublic = [&](AbstractFunctionDecl *func) { return func->getFormalAccess() >= AccessLevel::Public || func->getAttrs().hasAttribute() || - func->getAttrs().hasAttribute() || - func->getAttrs().hasAttribute(); + func->getAttrs().hasAttribute(); }; // If the original function is exported (i.e. it is public or diff --git a/stdlib/public/core/SIMDVector.swift b/stdlib/public/core/SIMDVector.swift index eee73c5bb16be..dc913a07ce4b9 100644 --- a/stdlib/public/core/SIMDVector.swift +++ b/stdlib/public/core/SIMDVector.swift @@ -803,7 +803,7 @@ extension SIMD where Scalar : FloatingPoint { @_transparent // SWIFT_ENABLE_TENSORFLOW @differentiable(vjp: _vjpSubtract(lhs:rhs:) - where Self: Differentiable, + where Self : Differentiable, Self.TangentVector: SIMD, Scalar : BinaryFloatingPoint, Self.TangentVector.Scalar : BinaryFloatingPoint) @@ -1197,9 +1197,10 @@ extension SIMD where Scalar: FixedWidthInteger { extension SIMD where Scalar: FloatingPoint { @_transparent + // SWIFT_ENABLE_TENSORFLOW @differentiable(vjp: _vjpNegate(rhs:) - where Self: Differentiable, - Self.TangentVector: SIMD, + where Self : Differentiable, + Self.TangentVector : SIMD, Scalar : BinaryFloatingPoint, Self.TangentVector.Scalar : BinaryFloatingPoint) public static prefix func -(rhs: Self) -> Self { @@ -1209,10 +1210,10 @@ extension SIMD where Scalar: FloatingPoint { @_transparent // SWIFT_ENABLE_TENSORFLOW @differentiable(vjp: _vjpAdd(lhs:rhs:) - where Self: Differentiable, + where Self : Differentiable, Self.TangentVector: SIMD, Scalar : Differentiable & BinaryFloatingPoint, - Scalar.TangentVector: BinaryFloatingPoint, + Scalar.TangentVector : BinaryFloatingPoint, Self.TangentVector.Scalar == Scalar.TangentVector) public static func +(lhs: Scalar, rhs: Self) -> Self { return Self(repeating: lhs) + rhs @@ -1221,10 +1222,10 @@ extension SIMD where Scalar: FloatingPoint { @_transparent // SWIFT_ENABLE_TENSORFLOW @differentiable(vjp: _vjpAdd(lhs:rhs:) - where Self: Differentiable, - Self.TangentVector: SIMD, + where Self : Differentiable, + Self.TangentVector : SIMD, Scalar : Differentiable & BinaryFloatingPoint, - Scalar.TangentVector: BinaryFloatingPoint, + Scalar.TangentVector : BinaryFloatingPoint, Self.TangentVector.Scalar == Scalar.TangentVector) public static func -(lhs: Scalar, rhs: Self) -> Self { return Self(repeating: lhs) - rhs @@ -1257,10 +1258,10 @@ extension SIMD where Scalar: FloatingPoint { @_transparent // SWIFT_ENABLE_TENSORFLOW @differentiable(vjp: _vjpAdd(lhs:rhs:) - where Self: Differentiable, - Self.TangentVector: SIMD, + where Self : Differentiable, + Self.TangentVector : SIMD, Scalar : Differentiable & BinaryFloatingPoint, - Scalar.TangentVector: BinaryFloatingPoint, + Scalar.TangentVector : BinaryFloatingPoint, Self.TangentVector.Scalar == Scalar.TangentVector) public static func +(lhs: Self, rhs: Scalar) -> Self { return lhs + Self(repeating: rhs) @@ -1269,10 +1270,10 @@ extension SIMD where Scalar: FloatingPoint { @_transparent // SWIFT_ENABLE_TENSORFLOW @differentiable(vjp: _vjpAdd(lhs:rhs:) - where Self: Differentiable, - Self.TangentVector: SIMD, + where Self : Differentiable, + Self.TangentVector : SIMD, Scalar : Differentiable & BinaryFloatingPoint, - Scalar.TangentVector: BinaryFloatingPoint, + Scalar.TangentVector : BinaryFloatingPoint, Self.TangentVector.Scalar == Scalar.TangentVector) public static func -(lhs: Self, rhs: Scalar) -> Self { return lhs - Self(repeating: rhs) @@ -1511,10 +1512,10 @@ where T: SIMD, T.Scalar: FloatingPoint { // SWIFT_ENABLE_TENSORFLOW extension SIMD - where Self: Differentiable, - TangentVector: SIMD, + where Self : Differentiable, + TangentVector : SIMD, Scalar : BinaryFloatingPoint, - TangentVector.Scalar: BinaryFloatingPoint { + TangentVector.Scalar : BinaryFloatingPoint { @inlinable static func _vjpAdd( lhs: Self, rhs: Self @@ -1543,7 +1544,7 @@ extension SIMD } extension SIMD - where Self: Differentiable, + where Self : Differentiable, TangentVector: SIMD, Scalar : BinaryFloatingPoint, Self.TangentVector == Self { diff --git a/test/AutoDiff/SIMD.swift b/test/AutoDiff/SIMD.swift index 1fbf9a3589b42..634f594223d55 100644 --- a/test/AutoDiff/SIMD.swift +++ b/test/AutoDiff/SIMD.swift @@ -30,6 +30,16 @@ SIMDTests.test("Negate") { expectEqual(-a, bp1(a)) } +SIMDTests.test("init(repeating:)") { + let g = SIMD4(1, 1, 1, 1) + + let foo1 = { (x: Float) -> SIMD4 in + return SIMD4(repeating: x) + } + let bp1 = pullback(at: 5, in: foo1) + expectEqual(4, bp1(g)) +} + SIMDTests.test("Sum") { let a = SIMD4(1, 2, 3, 4) From 2821b8fa2d49d4e08f045f6b848842da76712569 Mon Sep 17 00:00:00 2001 From: Bart Chrzaszcz Date: Mon, 3 Jun 2019 13:00:11 -0700 Subject: [PATCH 23/26] Fix up AdditiveArithmetic conformance. --- stdlib/public/core/SIMDVector.swift | 24 ++++++-------------- stdlib/public/core/SIMDVectorTypes.swift.gyb | 2 -- 2 files changed, 7 insertions(+), 19 deletions(-) diff --git a/stdlib/public/core/SIMDVector.swift b/stdlib/public/core/SIMDVector.swift index dc913a07ce4b9..fc7443c379af6 100644 --- a/stdlib/public/core/SIMDVector.swift +++ b/stdlib/public/core/SIMDVector.swift @@ -1221,7 +1221,7 @@ extension SIMD where Scalar: FloatingPoint { @_transparent // SWIFT_ENABLE_TENSORFLOW - @differentiable(vjp: _vjpAdd(lhs:rhs:) + @differentiable(vjp: _vjpSubtract(lhs:rhs:) where Self : Differentiable, Self.TangentVector : SIMD, Scalar : Differentiable & BinaryFloatingPoint, @@ -1269,7 +1269,7 @@ extension SIMD where Scalar: FloatingPoint { @_transparent // SWIFT_ENABLE_TENSORFLOW - @differentiable(vjp: _vjpAdd(lhs:rhs:) + @differentiable(vjp: _vjpSubtract(lhs:rhs:) where Self : Differentiable, Self.TangentVector : SIMD, Scalar : Differentiable & BinaryFloatingPoint, @@ -1303,16 +1303,6 @@ extension SIMD where Scalar: FloatingPoint { return lhs / Self(repeating: rhs) } -// @_transparent -// public static func +=(lhs: inout Self, rhs: Self) { -// lhs = lhs + rhs -// } - -// @_transparent -// public static func -=(lhs: inout Self, rhs: Self) { -// lhs = lhs - rhs -// } - @_transparent public static func *=(lhs: inout Self, rhs: Self) { lhs = lhs * rhs @@ -1323,11 +1313,6 @@ extension SIMD where Scalar: FloatingPoint { lhs = lhs / rhs } -// @_transparent -// public static func *=(lhs: inout Self, rhs: Scalar) { -// lhs = lhs * rhs -// } - @_transparent public static func +=(lhs: inout Self, rhs: Scalar) { lhs = lhs + rhs @@ -1337,6 +1322,11 @@ extension SIMD where Scalar: FloatingPoint { public static func -=(lhs: inout Self, rhs: Scalar) { lhs = lhs - rhs } + + @_transparent + public static func *=(lhs: inout Self, rhs: Scalar) { + lhs = lhs * rhs + } @_transparent public static func /=(lhs: inout Self, rhs: Scalar) { diff --git a/stdlib/public/core/SIMDVectorTypes.swift.gyb b/stdlib/public/core/SIMDVectorTypes.swift.gyb index c04734687683a..71c8499723a8e 100644 --- a/stdlib/public/core/SIMDVectorTypes.swift.gyb +++ b/stdlib/public/core/SIMDVectorTypes.swift.gyb @@ -192,8 +192,6 @@ extension SIMD${n} where Scalar: BinaryFloatingPoint { // SWIFT_ENABLE_TENSORFLOW extension SIMD${n} : AdditiveArithmetic where Scalar : FloatingPoint {} -//extension SIMD${n} : VectorNumeric where Scalar : FloatingPoint {} - extension SIMD${n} : Differentiable where Scalar : Differentiable & BinaryFloatingPoint, Scalar.TangentVector: BinaryFloatingPoint { From 7abb1c334a1bd0669c5b107c324c83afacea0d9e Mon Sep 17 00:00:00 2001 From: Bart Chrzaszcz Date: Wed, 12 Jun 2019 19:39:01 -0700 Subject: [PATCH 24/26] Code cleanup and additonal tests. --- stdlib/public/core/SIMDVector.swift | 88 +++++++-------- stdlib/public/core/SIMDVectorTypes.swift.gyb | 2 +- test/AutoDiff/SIMD.swift | 111 ++++++++++++++----- 3 files changed, 123 insertions(+), 78 deletions(-) diff --git a/stdlib/public/core/SIMDVector.swift b/stdlib/public/core/SIMDVector.swift index 54185664db673..d6933e40868bb 100644 --- a/stdlib/public/core/SIMDVector.swift +++ b/stdlib/public/core/SIMDVector.swift @@ -804,7 +804,7 @@ extension SIMD where Scalar : FloatingPoint { // SWIFT_ENABLE_TENSORFLOW @differentiable(vjp: _vjpSubtract(lhs:rhs:) where Self : Differentiable, - Self.TangentVector: SIMD, + Self.TangentVector : SIMD, Scalar : BinaryFloatingPoint, Self.TangentVector.Scalar : BinaryFloatingPoint) public static func -(lhs: Self, rhs: Self) -> Self { @@ -874,9 +874,9 @@ extension SIMD where Scalar : FloatingPoint { /// Returns the sum of the scalars in the vector. // SWIFT_ENABLE_TENSORFLOW - @inlinable // FIXME: TF-545 we want the sum() func to be marked as // `@_alwaysEmitIntoClient` like before when we define the VJP + @inlinable @differentiable(vjp: _vjpSum where Self : Differentiable, Self.TangentVector : SIMD, @@ -1211,7 +1211,7 @@ extension SIMD where Scalar: FloatingPoint { // SWIFT_ENABLE_TENSORFLOW @differentiable(vjp: _vjpAdd(lhs:rhs:) where Self : Differentiable, - Self.TangentVector: SIMD, + Self.TangentVector : SIMD, Scalar : Differentiable & BinaryFloatingPoint, Scalar.TangentVector : BinaryFloatingPoint, Self.TangentVector.Scalar == Scalar.TangentVector) @@ -1246,11 +1246,11 @@ extension SIMD where Scalar: FloatingPoint { @_transparent // SWIFT_ENABLE_TENSORFLOW @differentiable(vjp: _vjpDivide(lhs:rhs:) - where Self : Differentiable, - Self.TangentVector : SIMD, - Scalar : BinaryFloatingPoint & Differentiable, - Self.TangentVector == Self, - Scalar.TangentVector == Scalar) + where Self : Differentiable, + Self.TangentVector : SIMD, + Scalar : BinaryFloatingPoint & Differentiable, + Self.TangentVector == Self, + Scalar.TangentVector == Scalar) public static func /(lhs: Scalar, rhs: Self) -> Self { return Self(repeating: lhs) / rhs } @@ -1290,7 +1290,7 @@ extension SIMD where Scalar: FloatingPoint { public static func *(lhs: Self, rhs: Scalar) -> Self { return lhs * Self(repeating: rhs) } - + @_transparent // SWIFT_ENABLE_TENSORFLOW @differentiable(vjp: _vjpDivide(lhs:rhs:) @@ -1507,18 +1507,16 @@ extension SIMD Scalar : BinaryFloatingPoint, TangentVector.Scalar : BinaryFloatingPoint { @inlinable - static func _vjpAdd( - lhs: Self, rhs: Self - ) -> (Self, (TangentVector) -> (TangentVector, TangentVector)) { + static func _vjpAdd(lhs: Self, rhs: Self) + -> (Self, (TangentVector) -> (TangentVector, TangentVector)) { return (lhs + rhs, { v in return (v, v) }) } @inlinable - static func _vjpSubtract( - lhs: Self, rhs: Self - ) -> (Self, (TangentVector) -> (TangentVector, TangentVector)) { + static func _vjpSubtract(lhs: Self, rhs: Self) + -> (Self, (TangentVector) -> (TangentVector, TangentVector)) { return (lhs - rhs, { v in return (v, -v) }) @@ -1526,7 +1524,7 @@ extension SIMD @inlinable static func _vjpNegate(rhs: Self) - -> (Self, (TangentVector) -> (TangentVector)) { + -> (Self, (TangentVector) -> (TangentVector)) { return (-rhs, { v in return -v }) @@ -1535,22 +1533,20 @@ extension SIMD extension SIMD where Self : Differentiable, - TangentVector: SIMD, + TangentVector : SIMD, Scalar : BinaryFloatingPoint, Self.TangentVector == Self { @inlinable - static func _vjpMultiply( - lhs: Self, rhs: Self - ) -> (Self, (TangentVector) -> (TangentVector, TangentVector)) { + static func _vjpMultiply(lhs: Self, rhs: Self) + -> (Self, (TangentVector) -> (TangentVector, TangentVector)) { return (lhs * rhs, { v in return (v * rhs, v * lhs) }) } @inlinable - static func _vjpDivide( - lhs: Self, rhs: Self - ) -> (Self, (TangentVector) -> (TangentVector, TangentVector)) { + static func _vjpDivide(lhs: Self, rhs: Self) + -> (Self, (TangentVector) -> (TangentVector, TangentVector)) { return (lhs / rhs, { v in (v / rhs, -lhs / (rhs * rhs) * v) }) @@ -1561,39 +1557,35 @@ extension SIMD where Self : Differentiable, TangentVector : SIMD, Scalar : BinaryFloatingPoint & Differentiable, - Scalar.TangentVector: BinaryFloatingPoint, + Scalar.TangentVector : BinaryFloatingPoint, TangentVector.Scalar == Scalar.TangentVector { @inlinable - static func _vjpAdd( - lhs: Scalar, rhs: Self - ) -> (Self, (TangentVector) -> (Scalar.TangentVector, TangentVector)) { + static func _vjpAdd(lhs: Scalar, rhs: Self) + -> (Self, (TangentVector) -> (Scalar.TangentVector, TangentVector)) { return (lhs + rhs, { v in return (v.sum(), v) }) } @inlinable - static func _vjpSubtract( - lhs: Scalar, rhs: Self - ) -> (Self, (TangentVector) -> (Scalar.TangentVector, TangentVector)) { + static func _vjpSubtract(lhs: Scalar, rhs: Self) + -> (Self, (TangentVector) -> (Scalar.TangentVector, TangentVector)) { return (lhs - rhs, { v in return (v.sum(), -v) }) } @inlinable - static func _vjpAdd( - lhs: Self, rhs: Scalar - ) -> (Self, (TangentVector) -> (TangentVector, Scalar.TangentVector)) { + static func _vjpAdd(lhs: Self, rhs: Scalar) + -> (Self, (TangentVector) -> (TangentVector, Scalar.TangentVector)) { return (lhs + rhs, { v in return (v, v.sum()) }) } @inlinable - static func _vjpSubtract( - lhs: Self, rhs: Scalar - ) -> (Self, (TangentVector) -> (TangentVector, Scalar.TangentVector)) { + static func _vjpSubtract(lhs: Self, rhs: Scalar) + -> (Self, (TangentVector) -> (TangentVector, Scalar.TangentVector)) { return (lhs - rhs, { v in return (v, -v.sum()) }) @@ -1607,36 +1599,32 @@ extension SIMD Self.TangentVector == Self, Scalar.TangentVector == Scalar { @inlinable - static func _vjpMultiply( - lhs: Self, rhs: Scalar - ) -> (Self, (TangentVector) -> (TangentVector, Scalar.TangentVector)) { + static func _vjpMultiply(lhs: Self, rhs: Scalar) + -> (Self, (TangentVector) -> (TangentVector, Scalar.TangentVector)) { return (lhs * rhs, { v in return (v * rhs, (v * lhs).sum()) }) } @inlinable - static func _vjpDivide( - lhs: Self, rhs: Scalar - ) -> (Self, (TangentVector) -> (TangentVector, Scalar.TangentVector)) { + static func _vjpDivide(lhs: Self, rhs: Scalar) + -> (Self, (TangentVector) -> (TangentVector, Scalar.TangentVector)) { return (lhs / rhs, { v in (v / rhs, (-lhs / (rhs * rhs) * v).sum()) }) } @inlinable - static func _vjpMultiply( - lhs: Scalar, rhs: Self - ) -> (Self, (TangentVector) -> (Scalar.TangentVector, TangentVector)) { + static func _vjpMultiply(lhs: Scalar, rhs: Self) + -> (Self, (TangentVector) -> (Scalar.TangentVector, TangentVector)) { return (lhs * rhs, { v in return ((v * rhs).sum(), v * lhs) }) } @inlinable - static func _vjpDivide( - lhs: Scalar, rhs: Self - ) -> (Self, (TangentVector) -> (Scalar.TangentVector, TangentVector)) { + static func _vjpDivide(lhs: Scalar, rhs: Self) + -> (Self, (TangentVector) -> (Scalar.TangentVector, TangentVector)) { return (lhs / rhs, { v in ((v / rhs).sum(), -lhs / (rhs * rhs) * v) }) @@ -1662,8 +1650,8 @@ extension SIMD Self.TangentVector == Self, Scalar.TangentVector == Scalar { @usableFromInline - static func _vjpInit(repeating value: Scalar) -> - (Self, (TangentVector) -> Scalar.TangentVector) { + static func _vjpInit(repeating value: Scalar) + -> (Self, (TangentVector) -> Scalar.TangentVector) { return (Self(repeating: value), { v in v.sum() }) } } diff --git a/stdlib/public/core/SIMDVectorTypes.swift.gyb b/stdlib/public/core/SIMDVectorTypes.swift.gyb index 0fb982a1df27c..8f7f5fb71108f 100644 --- a/stdlib/public/core/SIMDVectorTypes.swift.gyb +++ b/stdlib/public/core/SIMDVectorTypes.swift.gyb @@ -194,7 +194,7 @@ extension SIMD${n} : AdditiveArithmetic where Scalar : FloatingPoint {} extension SIMD${n} : Differentiable where Scalar : Differentiable & BinaryFloatingPoint, - Scalar.TangentVector: BinaryFloatingPoint { + Scalar.TangentVector : BinaryFloatingPoint { public typealias TangentVector = SIMD${n} public typealias AllDifferentiableVariables = SIMD${n} public func tangentVector(from cotangent: TangentVector) -> TangentVector { diff --git a/test/AutoDiff/SIMD.swift b/test/AutoDiff/SIMD.swift index 634f594223d55..473db4b090d4d 100644 --- a/test/AutoDiff/SIMD.swift +++ b/test/AutoDiff/SIMD.swift @@ -10,44 +10,63 @@ import Glibc var SIMDTests = TestSuite("SIMD") +SIMDTests.test("init(repeating:)") { + let g = SIMD4(1, 1, 1, 1) + + let foo1 = { (x: Float) -> SIMD4 in + return SIMD4(repeating: 2 * x) + } + let (val, bp1) = valueWithPullback(at: 5, in: foo1) + expectEqual(SIMD4(10, 10, 10, 10), val) + expectEqual(8, bp1(g)) +} + +SIMDTests.test("Sum") { + let a = SIMD4(1, 2, 3, 4) + + let foo1 = { (x: SIMD4) -> Float in + return x.sum() + } + let (val, bp1) = valueWithPullback(at: a, in: foo1) + expectEqual(10, val) + expectEqual(SIMD4(3, 3, 3, 3), bp1(3)) +} + SIMDTests.test("Identity") { let a = SIMD4(1, 2, 3, 4) + let g = SIMD4(1, 1, 1, 1) let foo1 = { (x: SIMD4) -> SIMD4 in return x } - let bp1 = pullback(at: a, in: foo1) - expectEqual(a, bp1(a)) + let (val, bp1) = valueWithPullback(at: a, in: foo1) + expectEqual(a, val) + expectEqual(g, bp1(g)) } SIMDTests.test("Negate") { let a = SIMD4(1, 2, 3, 4) + let g = SIMD4(1, 1, 1, 1) let foo1 = { (x: SIMD4) -> SIMD4 in return -x } - let bp1 = pullback(at: a, in: foo1) - expectEqual(-a, bp1(a)) + let (val, bp1) = valueWithPullback(at: a, in: foo1) + expectEqual(-a, val) + expectEqual(-g, bp1(g)) } -SIMDTests.test("init(repeating:)") { +SIMDTests.test("subscript") { + let a = SIMD4(1, 2, 3, 4) let g = SIMD4(1, 1, 1, 1) - let foo1 = { (x: Float) -> SIMD4 in - return SIMD4(repeating: x) - } - let bp1 = pullback(at: 5, in: foo1) - expectEqual(4, bp1(g)) -} - -SIMDTests.test("Sum") { - let a = SIMD4(1, 2, 3, 4) - let foo1 = { (x: SIMD4) -> Float in - return x.sum() + return x[3] } - let bp1 = pullback(at: a, in: foo1) - expectEqual(SIMD4(3, 3, 3, 3), bp1(3)) + + let (val, bp1) = valueWithPullback(at: a, in: foo1) + expectEqual(4, val) + expectEqual(SIMD4(0, 0, 0, 7), bp1(7)) } SIMDTests.test("Addition") { @@ -58,24 +77,56 @@ SIMDTests.test("Addition") { let foo1 = { (x: SIMD4, y: SIMD4) -> SIMD4 in return x + y } - let bp1 = pullback(at: a, a, in: foo1) + let (val1, bp1) = valueWithPullback(at: a, a, in: foo1) + expectEqual(SIMD4(2, 4, 6, 8), val1) expectEqual((g, g), bp1(g)) // SIMD + Scalar let foo2 = { (x: SIMD4, y: Float) -> SIMD4 in return x + y } - let bp2 = pullback(at: a, 5, in: foo2) + let (val2, bp2) = valueWithPullback(at: a, 5, in: foo2) + expectEqual(SIMD4(6, 7, 8, 9), val2) expectEqual((g, 4), bp2(g)) // Scalar + SIMD let foo3 = { (x: SIMD4, y: Float) -> SIMD4 in return y + x } - let bp3 = pullback(at: a, 5, in: foo3) + let (val3, bp3) = valueWithPullback(at: a, 5, in: foo3) + expectEqual(SIMD4(6, 7, 8, 9), val3) expectEqual((g, 4), bp3(g)) } +SIMDTests.test("Subtraction") { + let a = SIMD4(1, 2, 3, 4) + let g = SIMD4(1, 1, 1, 1) + + // SIMD - SIMD + let foo1 = { (x: SIMD4, y: SIMD4) -> SIMD4 in + return x - y + } + let (val1, bp1) = valueWithPullback(at: a, a, in: foo1) + expectEqual(SIMD4(0, 0, 0, 0), val1) + expectEqual((g, -g), bp1(g)) + + // SIMD - Scalar + let foo2 = { (x: SIMD4, y: Float) -> SIMD4 in + return x - y + } + let (val2, bp2) = valueWithPullback(at: a, 5, in: foo2) + expectEqual(SIMD4(-4, -3, -2, -1), val2) + expectEqual((g, -4), bp2(g)) + + // Scalar - SIMD + let foo3 = { (x: SIMD4, y: Float) -> SIMD4 in + return y - x + } + let (val3, bp3) = valueWithPullback(at: a, 5, in: foo3) + expectEqual(SIMD4(4, 3, 2, 1), val3) + expectEqual((-g, 4), bp3(g)) +} + SIMDTests.test("Multiplication") { let a = SIMD4(1, 2, 3, 4) let g = SIMD4(1, 1, 1, 1) @@ -84,21 +135,24 @@ SIMDTests.test("Multiplication") { let foo1 = { (x: SIMD4, y: SIMD4) -> SIMD4 in return x * y } - let bp1 = pullback(at: a, a, in: foo1) + let (val1, bp1) = valueWithPullback(at: a, a, in: foo1) + expectEqual(a * a, val1) expectEqual((a, a), bp1(g)) // SIMD * Scalar let foo2 = { (x: SIMD4, y: Float) -> SIMD4 in return x * y } - let bp2 = pullback(at: a, 5, in: foo2) + let (val2, bp2) = valueWithPullback(at: a, 5, in: foo2) + expectEqual(a * 5, val2) expectEqual((SIMD4(5, 5, 5, 5), 10), bp2(g)) // Scalar * SIMD let foo3 = { (x: SIMD4, y: Float) -> SIMD4 in return y * x } - let bp3 = pullback(at: a, 5, in: foo3) + let (val3, bp3) = valueWithPullback(at: a, 5, in: foo3) + expectEqual(a * 5, val3) expectEqual((SIMD4(5, 5, 5, 5), 10), bp3(g)) } @@ -110,18 +164,20 @@ SIMDTests.test("Division") { let foo1 = { (x: SIMD4, y: SIMD4) -> SIMD4 in return x / y } - let bp1 = pullback(at: a, a, in: foo1) let dlhs1 = g / a let drhs1 = -1 / a + let (val1, bp1) = valueWithPullback(at: a, a, in: foo1) + expectEqual(a / a, val1) expectEqual((dlhs1, drhs1), bp1(g)) // SIMD / Scalar let foo2 = { (x: SIMD4, y: Float) -> SIMD4 in return x / y } - let bp2 = pullback(at: a, 5, in: foo2) let dlhs2 = g / 5 let drhs2 = (-a / 25 * g).sum() + let (val2, bp2) = valueWithPullback(at: a, 5, in: foo2) + expectEqual(a / 5, val2) expectEqual((dlhs2, drhs2), bp2(g)) // Scalar / SIMD @@ -130,7 +186,8 @@ SIMDTests.test("Division") { } let dlhs3 = (g / a).sum() let drhs3 = -5 / (a*a) * g - let bp3 = pullback(at: 5, a, in: foo3) + let (val3, bp3) = valueWithPullback(at: 5, a, in: foo3) + expectEqual(5 / a, val3) expectEqual((dlhs3, drhs3), bp3(g)) } From 0adb6eb22f53f3d015f1649cfd6eeab06601ee34 Mon Sep 17 00:00:00 2001 From: Bart Chrzaszcz Date: Thu, 13 Jun 2019 08:56:57 -0700 Subject: [PATCH 25/26] White space. --- stdlib/public/core/SIMDVector.swift | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/stdlib/public/core/SIMDVector.swift b/stdlib/public/core/SIMDVector.swift index d6933e40868bb..3089fa915a4f8 100644 --- a/stdlib/public/core/SIMDVector.swift +++ b/stdlib/public/core/SIMDVector.swift @@ -1210,11 +1210,11 @@ extension SIMD where Scalar: FloatingPoint { @_transparent // SWIFT_ENABLE_TENSORFLOW @differentiable(vjp: _vjpAdd(lhs:rhs:) - where Self : Differentiable, - Self.TangentVector : SIMD, - Scalar : Differentiable & BinaryFloatingPoint, - Scalar.TangentVector : BinaryFloatingPoint, - Self.TangentVector.Scalar == Scalar.TangentVector) + where Self : Differentiable, + Self.TangentVector : SIMD, + Scalar : Differentiable & BinaryFloatingPoint, + Scalar.TangentVector : BinaryFloatingPoint, + Self.TangentVector.Scalar == Scalar.TangentVector) public static func +(lhs: Scalar, rhs: Self) -> Self { return Self(repeating: lhs) + rhs } @@ -1290,7 +1290,7 @@ extension SIMD where Scalar: FloatingPoint { public static func *(lhs: Self, rhs: Scalar) -> Self { return lhs * Self(repeating: rhs) } - + @_transparent // SWIFT_ENABLE_TENSORFLOW @differentiable(vjp: _vjpDivide(lhs:rhs:) @@ -1322,7 +1322,7 @@ extension SIMD where Scalar: FloatingPoint { public static func -=(lhs: inout Self, rhs: Scalar) { lhs = lhs - rhs } - + @_transparent public static func *=(lhs: inout Self, rhs: Scalar) { lhs = lhs * rhs From d73f3dab585cb17ea128e215a634bf2c8d6cf985 Mon Sep 17 00:00:00 2001 From: Bart Chrzaszcz Date: Thu, 13 Jun 2019 15:49:45 -0700 Subject: [PATCH 26/26] Add generic tests. --- test/AutoDiff/SIMD.swift | 103 ++++++++++++++++++++++++++++++++++----- 1 file changed, 92 insertions(+), 11 deletions(-) diff --git a/test/AutoDiff/SIMD.swift b/test/AutoDiff/SIMD.swift index 473db4b090d4d..380132a3ab5da 100644 --- a/test/AutoDiff/SIMD.swift +++ b/test/AutoDiff/SIMD.swift @@ -16,8 +16,8 @@ SIMDTests.test("init(repeating:)") { let foo1 = { (x: Float) -> SIMD4 in return SIMD4(repeating: 2 * x) } - let (val, bp1) = valueWithPullback(at: 5, in: foo1) - expectEqual(SIMD4(10, 10, 10, 10), val) + let (val1, bp1) = valueWithPullback(at: 5, in: foo1) + expectEqual(SIMD4(10, 10, 10, 10), val1) expectEqual(8, bp1(g)) } @@ -27,8 +27,8 @@ SIMDTests.test("Sum") { let foo1 = { (x: SIMD4) -> Float in return x.sum() } - let (val, bp1) = valueWithPullback(at: a, in: foo1) - expectEqual(10, val) + let (val1, bp1) = valueWithPullback(at: a, in: foo1) + expectEqual(10, val1) expectEqual(SIMD4(3, 3, 3, 3), bp1(3)) } @@ -39,8 +39,8 @@ SIMDTests.test("Identity") { let foo1 = { (x: SIMD4) -> SIMD4 in return x } - let (val, bp1) = valueWithPullback(at: a, in: foo1) - expectEqual(a, val) + let (val1, bp1) = valueWithPullback(at: a, in: foo1) + expectEqual(a, val1) expectEqual(g, bp1(g)) } @@ -51,21 +51,20 @@ SIMDTests.test("Negate") { let foo1 = { (x: SIMD4) -> SIMD4 in return -x } - let (val, bp1) = valueWithPullback(at: a, in: foo1) - expectEqual(-a, val) + let (val1, bp1) = valueWithPullback(at: a, in: foo1) + expectEqual(-a, val1) expectEqual(-g, bp1(g)) } SIMDTests.test("subscript") { let a = SIMD4(1, 2, 3, 4) - let g = SIMD4(1, 1, 1, 1) let foo1 = { (x: SIMD4) -> Float in return x[3] } - let (val, bp1) = valueWithPullback(at: a, in: foo1) - expectEqual(4, val) + let (val1, bp1) = valueWithPullback(at: a, in: foo1) + expectEqual(4, val1) expectEqual(SIMD4(0, 0, 0, 7), bp1(7)) } @@ -191,4 +190,86 @@ SIMDTests.test("Division") { expectEqual((dlhs3, drhs3), bp3(g)) } +SIMDTests.test("Generics") { + let a = SIMD3(1, 2, 3) + let g = SIMD3(1, 1, 1) + + func testInit(x: Scalar) -> SIMDType + where SIMDType.Scalar == Scalar, + SIMDType : Differentiable, + Scalar : BinaryFloatingPoint & Differentiable, + SIMDType.TangentVector == SIMDType, + Scalar.TangentVector == Scalar { + return SIMDType.init(repeating: x) + } + func simd3Init(x: Double) -> SIMD3 { testInit(x: x) } + let (val1, bp1) = valueWithPullback(at: 10, in: simd3Init) + expectEqual(SIMD3(10, 10, 10), val1) + expectEqual(3, bp1(g)) + + // SIMDType + SIMDType + func testAddition(lhs: SIMDType, rhs: SIMDType) + -> SIMDType + where SIMDType.Scalar == Scalar, + SIMDType : Differentiable, + SIMDType.TangentVector : SIMD, + Scalar : BinaryFloatingPoint, + SIMDType.TangentVector.Scalar : BinaryFloatingPoint { + return lhs + rhs + } + func simd3Add(lhs: SIMD3, rhs: SIMD3) -> SIMD3 { + return testAddition(lhs: lhs, rhs: rhs) + } + let (val2, bp2) = valueWithPullback(at: a, a, in: simd3Add) + expectEqual(SIMD3(2, 4, 6), val2) + expectEqual((g, g), bp2(g)) + + // Scalar - SIMDType + func testSubtraction(lhs: Scalar, rhs: SIMDType) + -> SIMDType + where SIMDType.Scalar == Scalar, + SIMDType : Differentiable, + Scalar : BinaryFloatingPoint & Differentiable, + SIMDType.TangentVector == SIMDType, + Scalar.TangentVector == Scalar { + return lhs - rhs + } + func simd3Subtract(lhs: Double, rhs: SIMD3) -> SIMD3 { + return testSubtraction(lhs: lhs, rhs: rhs) + } + let (val3, bp3) = valueWithPullback(at: 5, a, in: simd3Subtract) + expectEqual(SIMD3(4, 3, 2), val3) + expectEqual((3, SIMD3(-1, -1, -1)), bp3(g)) + + // SIMDType * Scalar + func testMultipication(lhs: SIMDType, rhs: Scalar) + -> SIMDType + where SIMDType.Scalar == Scalar, + SIMDType : Differentiable, + Scalar : BinaryFloatingPoint & Differentiable, + SIMDType.TangentVector == SIMDType, + Scalar.TangentVector == Scalar { + return lhs * rhs + } + func simd3Multiply(lhs: SIMD3, rhs: Double) -> SIMD3 { + return testMultipication(lhs: lhs, rhs: rhs) + } + let (val4, bp4) = valueWithPullback(at: a, 5, in: simd3Multiply) + expectEqual(SIMD3(5, 10, 15), val4) + expectEqual((SIMD3(5, 5, 5), 6), bp4(g)) + + func testSum(x: SIMDType) -> Scalar + where SIMDType.Scalar == Scalar, + SIMDType : Differentiable, + Scalar : BinaryFloatingPoint & Differentiable, + Scalar.TangentVector : BinaryFloatingPoint, + SIMDType.TangentVector == SIMDType { + return x.sum() + } + func simd3Sum(x: SIMD3) -> Double { testSum(x: x) } + let (val5, bp5) = valueWithPullback(at: a, in: simd3Sum) + expectEqual(6, val5) + expectEqual(SIMD3(7, 7, 7), bp5(7)) +} + runAllTests()