Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions stdlib/public/TensorFlow/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,7 @@ set(SOURCES
CompositeMath.swift
Dataset.swift
DataTypes.swift
Execution.swift
Gradients.swift
Ops.swift
ShapedArray.swift
Expand Down
65 changes: 5 additions & 60 deletions stdlib/public/TensorFlow/CompilerRuntime.swift
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,10 @@
//===----------------------------------------------------------------------===//
//
// This file defines the Swift runtime support for TensorFlow computation.
//
// This file should only contain internal details: runtime-related public APIs
// should be defined in `Execution.swift`.
//
// Design notes on TF eager based runtime:
//
// 1. A global context (`_ExecutionContext.global`) is used to manage all tensor
Expand Down Expand Up @@ -41,32 +45,6 @@ import Glibc
#endif
import CTensorFlow

// If `serverAddress` is nil, use local session (good for forge testing).
//
// FIXME: We need transparent here because deabstraction isn't inlining this
// function. We need to inline if a callee contains tensor ops, not only if
// it takes and returns a TensorFlow value.
@_transparent
public func enableTPU(serverAddress: String? = nil, infeed: Bool = true) {
_RuntimeConfig.executionMode = .tpu
if let serverAddress = serverAddress {
_RuntimeConfig.session = .remote(serverDef: serverAddress)
}
#tfop("tfc.configureTPU", enableInfeed: infeed) as Void
}

// FIXME: Extend the interface to support multiple GPU devices, and unify it
// with enableTPU() above.
@_transparent
public func enableGPU() {
#tfop("tfc.configureGPU") as Void
}

@_transparent
public func enableCPU() {
#tfop("tfc.configureCPU") as Void
}

// @_frozen // SR-9739
public enum _ExecutionMode : Equatable {
/// CPU or GPU execution.
Expand Down Expand Up @@ -1598,7 +1576,7 @@ public func _TFCStartTensorComputation(
/// GPU.
/// - tensorResultCount: The number of results to accept from the computation.
/// - Note: The result address as passed in is pointing to uninitialized memory,
/// this must initialize the memory, transfering ownership of the tensor
/// this must initialize the memory, transferring ownership of the tensor
/// handles to the caller.
@inlinable
@_silgen_name("_swift_tfc_FinishTensorComputation")
Expand Down Expand Up @@ -2047,39 +2025,6 @@ class _ThreadLocalState {
}
}

/// Executes a closure, making TensorFlow operations run on a specific kind of
/// device.
///
/// - Parameters:
/// - kind: A kind of device to run TensorFlow operations on.
/// - index: The device to run the ops on.
/// - body: A closure whose TensorFlow operations are to be executed on the
/// specified kind of device.
// Use inline never to ensure correctness in scoped device placement. See
// https://bugs.swift.org/browse/SR-9535 for more context.
@inline(never)
public func withDevice<R>(_ kind: DeviceKind, _ index: UInt = 0,
perform body: () throws -> R) rethrows -> R {
_ThreadLocalState.value.pushDevice((kind, index))
let result = try body()
_ThreadLocalState.value.popDevice()
return result
}

/// Executes a closure, allowing TensorFlow to place TensorFlow operations on
/// any device. This should restore the default placement behavior.
///
/// - Parameters:
/// - body: A closure whose TensorFlow operations are to be executed on the
/// specified kind of device.
@inline(never)
public func withDefaultDevice<R>(perform body: () throws -> R) rethrows -> R {
_ThreadLocalState.value.pushDevice(nil)
let result = try body()
_ThreadLocalState.value.popDevice()
return result
}

@usableFromInline
@_cdecl("_swift_tfc_OpSetDeviceFromScope")
func _TFCOpSetDeviceFromScope(_ op: CTFEOp, _ status: CTFStatus) {
Expand Down
74 changes: 74 additions & 0 deletions stdlib/public/TensorFlow/Execution.swift
Original file line number Diff line number Diff line change
@@ -0,0 +1,74 @@
//===-- Execution.swift ---------------------------------------*- swift -*-===//
//
// This source file is part of the Swift.org open source project
//
// Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors
// Licensed under Apache License v2.0 with Runtime Library Exception
//
// See https://swift.org/LICENSE.txt for license information
// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
//
//===----------------------------------------------------------------------===//
//
// This file defines public APIs regarding TensorFlow runtime support.
//
//===----------------------------------------------------------------------===//

// If `serverAddress` is nil, use local session (good for forge testing).
//
// FIXME: We need transparent here because deabstraction isn't inlining this
// function. We need to inline if a callee contains tensor ops, not only if
// it takes and returns a TensorFlow value.
@_transparent
public func enableTPU(serverAddress: String? = nil, infeed: Bool = true) {
_RuntimeConfig.executionMode = .tpu
if let serverAddress = serverAddress {
_RuntimeConfig.session = .remote(serverDef: serverAddress)
}
#tfop("tfc.configureTPU", enableInfeed: infeed) as Void
}

// FIXME: Extend the interface to support multiple GPU devices, and unify it
// with enableTPU() above.
@_transparent
public func enableGPU() {
#tfop("tfc.configureGPU") as Void
}

@_transparent
public func enableCPU() {
#tfop("tfc.configureCPU") as Void
}

/// Executes a closure, making TensorFlow operations run on a specific kind of
/// device.
///
/// - Parameters:
/// - kind: A kind of device to run TensorFlow operations on.
/// - index: The device to run the ops on.
/// - body: A closure whose TensorFlow operations are to be executed on the
/// specified kind of device.
// Use inline never to ensure correctness in scoped device placement. See
// https://bugs.swift.org/browse/SR-9535 for more context.
@inline(never)
public func withDevice<R>(_ kind: DeviceKind, _ index: UInt = 0,
perform body: () throws -> R) rethrows -> R {
_ThreadLocalState.value.pushDevice((kind, index))
let result = try body()
_ThreadLocalState.value.popDevice()
return result
}

/// Executes a closure, allowing TensorFlow to place TensorFlow operations on
/// any device. This should restore the default placement behavior.
///
/// - Parameters:
/// - body: A closure whose TensorFlow operations are to be executed on the
/// specified kind of device.
@inline(never)
public func withDefaultDevice<R>(perform body: () throws -> R) rethrows -> R {
_ThreadLocalState.value.pushDevice(nil)
let result = try body()
_ThreadLocalState.value.popDevice()
return result
}