diff --git a/stdlib/public/TensorFlow/CMakeLists.txt b/stdlib/public/TensorFlow/CMakeLists.txt index 0256cff8bb57e..16c15059abfb2 100644 --- a/stdlib/public/TensorFlow/CMakeLists.txt +++ b/stdlib/public/TensorFlow/CMakeLists.txt @@ -37,6 +37,7 @@ set(SOURCES CompositeMath.swift Dataset.swift DataTypes.swift + Execution.swift Gradients.swift Ops.swift ShapedArray.swift diff --git a/stdlib/public/TensorFlow/CompilerRuntime.swift b/stdlib/public/TensorFlow/CompilerRuntime.swift index 917d265aa4d9b..61814660cfe5f 100644 --- a/stdlib/public/TensorFlow/CompilerRuntime.swift +++ b/stdlib/public/TensorFlow/CompilerRuntime.swift @@ -11,6 +11,10 @@ //===----------------------------------------------------------------------===// // // This file defines the Swift runtime support for TensorFlow computation. +// +// This file should only contain internal details: runtime-related public APIs +// should be defined in `Execution.swift`. +// // Design notes on TF eager based runtime: // // 1. A global context (`_ExecutionContext.global`) is used to manage all tensor @@ -41,32 +45,6 @@ import Glibc #endif import CTensorFlow -// If `serverAddress` is nil, use local session (good for forge testing). -// -// FIXME: We need transparent here because deabstraction isn't inlining this -// function. We need to inline if a callee contains tensor ops, not only if -// it takes and returns a TensorFlow value. -@_transparent -public func enableTPU(serverAddress: String? = nil, infeed: Bool = true) { - _RuntimeConfig.executionMode = .tpu - if let serverAddress = serverAddress { - _RuntimeConfig.session = .remote(serverDef: serverAddress) - } - #tfop("tfc.configureTPU", enableInfeed: infeed) as Void -} - -// FIXME: Extend the interface to support multiple GPU devices, and unify it -// with enableTPU() above. -@_transparent -public func enableGPU() { - #tfop("tfc.configureGPU") as Void -} - -@_transparent -public func enableCPU() { - #tfop("tfc.configureCPU") as Void -} - // @_frozen // SR-9739 public enum _ExecutionMode : Equatable { /// CPU or GPU execution. @@ -1598,7 +1576,7 @@ public func _TFCStartTensorComputation( /// GPU. /// - tensorResultCount: The number of results to accept from the computation. /// - Note: The result address as passed in is pointing to uninitialized memory, -/// this must initialize the memory, transfering ownership of the tensor +/// this must initialize the memory, transferring ownership of the tensor /// handles to the caller. @inlinable @_silgen_name("_swift_tfc_FinishTensorComputation") @@ -2047,39 +2025,6 @@ class _ThreadLocalState { } } -/// Executes a closure, making TensorFlow operations run on a specific kind of -/// device. -/// -/// - Parameters: -/// - kind: A kind of device to run TensorFlow operations on. -/// - index: The device to run the ops on. -/// - body: A closure whose TensorFlow operations are to be executed on the -/// specified kind of device. -// Use inline never to ensure correctness in scoped device placement. See -// https://bugs.swift.org/browse/SR-9535 for more context. -@inline(never) -public func withDevice(_ kind: DeviceKind, _ index: UInt = 0, - perform body: () throws -> R) rethrows -> R { - _ThreadLocalState.value.pushDevice((kind, index)) - let result = try body() - _ThreadLocalState.value.popDevice() - return result -} - -/// Executes a closure, allowing TensorFlow to place TensorFlow operations on -/// any device. This should restore the default placement behavior. -/// -/// - Parameters: -/// - body: A closure whose TensorFlow operations are to be executed on the -/// specified kind of device. -@inline(never) -public func withDefaultDevice(perform body: () throws -> R) rethrows -> R { - _ThreadLocalState.value.pushDevice(nil) - let result = try body() - _ThreadLocalState.value.popDevice() - return result -} - @usableFromInline @_cdecl("_swift_tfc_OpSetDeviceFromScope") func _TFCOpSetDeviceFromScope(_ op: CTFEOp, _ status: CTFStatus) { diff --git a/stdlib/public/TensorFlow/Execution.swift b/stdlib/public/TensorFlow/Execution.swift new file mode 100644 index 0000000000000..9650b449b06e8 --- /dev/null +++ b/stdlib/public/TensorFlow/Execution.swift @@ -0,0 +1,74 @@ +//===-- Execution.swift ---------------------------------------*- swift -*-===// +// +// This source file is part of the Swift.org open source project +// +// Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors +// Licensed under Apache License v2.0 with Runtime Library Exception +// +// See https://swift.org/LICENSE.txt for license information +// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors +// +//===----------------------------------------------------------------------===// +// +// This file defines public APIs regarding TensorFlow runtime support. +// +//===----------------------------------------------------------------------===// + +// If `serverAddress` is nil, use local session (good for forge testing). +// +// FIXME: We need transparent here because deabstraction isn't inlining this +// function. We need to inline if a callee contains tensor ops, not only if +// it takes and returns a TensorFlow value. +@_transparent +public func enableTPU(serverAddress: String? = nil, infeed: Bool = true) { + _RuntimeConfig.executionMode = .tpu + if let serverAddress = serverAddress { + _RuntimeConfig.session = .remote(serverDef: serverAddress) + } + #tfop("tfc.configureTPU", enableInfeed: infeed) as Void +} + +// FIXME: Extend the interface to support multiple GPU devices, and unify it +// with enableTPU() above. +@_transparent +public func enableGPU() { + #tfop("tfc.configureGPU") as Void +} + +@_transparent +public func enableCPU() { + #tfop("tfc.configureCPU") as Void +} + +/// Executes a closure, making TensorFlow operations run on a specific kind of +/// device. +/// +/// - Parameters: +/// - kind: A kind of device to run TensorFlow operations on. +/// - index: The device to run the ops on. +/// - body: A closure whose TensorFlow operations are to be executed on the +/// specified kind of device. +// Use inline never to ensure correctness in scoped device placement. See +// https://bugs.swift.org/browse/SR-9535 for more context. +@inline(never) +public func withDevice(_ kind: DeviceKind, _ index: UInt = 0, + perform body: () throws -> R) rethrows -> R { + _ThreadLocalState.value.pushDevice((kind, index)) + let result = try body() + _ThreadLocalState.value.popDevice() + return result +} + +/// Executes a closure, allowing TensorFlow to place TensorFlow operations on +/// any device. This should restore the default placement behavior. +/// +/// - Parameters: +/// - body: A closure whose TensorFlow operations are to be executed on the +/// specified kind of device. +@inline(never) +public func withDefaultDevice(perform body: () throws -> R) rethrows -> R { + _ThreadLocalState.value.pushDevice(nil) + let result = try body() + _ThreadLocalState.value.popDevice() + return result +}