From fd2eb5f022278e136f517778c1e12b4671632021 Mon Sep 17 00:00:00 2001 From: Konrad `ktoso` Malawski Date: Tue, 22 Nov 2022 10:13:08 +0900 Subject: [PATCH 1/8] [Concurrency] Optimize Void task group, to not store completed tasks anymore --- .../BackDeployConcurrency/TaskGroup.cpp | 99 ++++++++++++++----- .../BackDeployConcurrency/TaskGroup.swift | 1 + stdlib/public/Concurrency/TaskGroup.cpp | 91 +++++++++++++++-- stdlib/public/Concurrency/TaskGroup.swift | 30 +++++- ...c_taskgroup_void_neverConsumingTasks.swift | 76 ++++++++++++++ 5 files changed, 264 insertions(+), 33 deletions(-) create mode 100644 test/Concurrency/Runtime/async_taskgroup_void_neverConsumingTasks.swift diff --git a/stdlib/public/BackDeployConcurrency/TaskGroup.cpp b/stdlib/public/BackDeployConcurrency/TaskGroup.cpp index 359d2d8603dcb..acdc25e8529df 100644 --- a/stdlib/public/BackDeployConcurrency/TaskGroup.cpp +++ b/stdlib/public/BackDeployConcurrency/TaskGroup.cpp @@ -133,6 +133,15 @@ class TaskGroupImpl: public TaskGroupTaskStatusRecord { /*task*/ asyncTask }; } + + static PollResult getVoid() { + return PollResult{ + /*status*/ PollStatus::Success, + /*storage*/ nullptr, + /*successType*/nullptr, // TODO: Void.self + /*task*/ nullptr + }; + } }; /// An item within the message queue of a group. @@ -555,13 +564,37 @@ static void fillGroupNextResult(TaskFutureWaitAsyncContext *context, } } +static void fillGroupNextVoidResult(TaskFutureWaitAsyncContext *context, + PollResult result) { + /// Fill in the result value + switch (result.status) { + case PollStatus::MustWait: + assert(false && "filling a waiting status?"); + return; + + case PollStatus::Error: { + assert(false && "this type of task group cannot throw"); + return; + } + + case PollStatus::Success: + case PollStatus::Empty: { + // "Success" type is guaranteed to be Void + // Initialize the result as a nil Optional. + const Metadata *successType = result.successType; + OpaqueValue *destPtr = context->successResultPointer; + successType->vw_storeEnumTagSinglePayload(destPtr, 1, 1); + return; + } + } +} + void TaskGroupImpl::offer(AsyncTask *completedTask, AsyncContext *context) { assert(completedTask); assert(completedTask->isFuture()); assert(completedTask->hasChildFragment()); assert(completedTask->hasGroupChildFragment()); assert(completedTask->groupChildFragment()->getGroup() == asAbstract(this)); - SWIFT_TASK_DEBUG_LOG("offer task %p to group %p", completedTask, this); mutex.lock(); // TODO: remove fragment lock, and use status for synchronization @@ -572,6 +605,7 @@ void TaskGroupImpl::offer(AsyncTask *completedTask, AsyncContext *context) { // W:n R:0 P:1 -> W:y R:1 P:1 // complete immediately // W:n R:0 P:1 -> W:y R:1 P:3 // complete immediately, 2 more pending tasks auto assumed = statusAddReadyAssumeAcquire(); + SWIFT_TASK_DEBUG_LOG("offer task %p to group %p, tasks pending = %d", completedTask, assumed.pendingTasks()); auto asyncContextPrefix = reinterpret_cast( reinterpret_cast(context) - sizeof(FutureAsyncContextPrefix)); @@ -607,7 +641,13 @@ void TaskGroupImpl::offer(AsyncTask *completedTask, AsyncContext *context) { static_cast( waitingTask->ResumeContext); - fillGroupNextResult(waitingContext, result); + if (this->eagerlyReleaseCompleteTasks) { + fprintf(stderr, "[%s:%d](%s) offer: eagerlyReleaseCompleteTasks\n", __FILE_NAME__, __LINE__, __FUNCTION__); + fillGroupNextResult(waitingContext, result); + } else { + fprintf(stderr, "[%s:%d](%s) offer: NOT\n", __FILE_NAME__, __LINE__, __FUNCTION__); + fillGroupNextResult(waitingContext, result); + } detachChild(result.retainedTask); _swift_tsan_acquire(static_cast(waitingTask)); @@ -627,20 +667,31 @@ void TaskGroupImpl::offer(AsyncTask *completedTask, AsyncContext *context) { // queue when a task polls during next() it will notice that we have a value // ready for it, and will process it immediately without suspending. assert(!waitQueue.load(std::memory_order_relaxed)); - SWIFT_TASK_DEBUG_LOG("group has no waiting tasks, RETAIN and store ready task = %p", - completedTask); - // Retain the task while it is in the queue; - // it must remain alive until the task group is alive. - swift_retain(completedTask); - - auto readyItem = ReadyQueueItem::get( - hadErrorResult ? ReadyStatus::Error : ReadyStatus::Success, - completedTask - ); + if (!this->eagerlyReleaseCompleteTasks) { + SWIFT_TASK_DEBUG_LOG("group has no waiting tasks, RETAIN and store ready task = %p", + completedTask); + // Retain the task while it is in the queue; + // it must remain alive until the task group is alive. + swift_retain(completedTask); + + auto readyItem = ReadyQueueItem::get( + hadErrorResult ? ReadyStatus::Error : ReadyStatus::Success, + completedTask + ); + + assert(completedTask == readyItem.getTask()); + assert(readyItem.getTask()->isFuture()); + readyQueue.enqueue(readyItem); + } else { + assert(this->eagerlyReleaseCompleteTasks); + // DO NOT retain the task; and do not store the value in the readyQueue at all (!) + // + // In the "eagerlyRelease" completed tasks mode, we are guaranteed that tasks are of Void type, + // and thus there is no necessity to store values, because we can always "make them up" when polled. + // From the user's perspective, it is indistinguishable if they received the "real value" or one we "made up", + // because Void is always the same, and cannot be examined in any way to determine if it was the "actual" Void or not. + } - assert(completedTask == readyItem.getTask()); - assert(readyItem.getTask()->isFuture()); - readyQueue.enqueue(readyItem); mutex.unlock(); // TODO: remove fragment lock, and use status for synchronization return; } @@ -698,7 +749,7 @@ static void swift_taskGroup_wait_next_throwingImpl( PollResult polled = group->poll(waitingTask); switch (polled.status) { case PollStatus::MustWait: - SWIFT_TASK_DEBUG_LOG("poll group = %p, no ready tasks, waiting task = %p", + SWIFT_TASK_DEBUG_LOG("poll group = %p, tasks ready = 0, waiting task = %p", group, waitingTask); // The waiting task has been queued on the channel, // there were pending tasks so it will be woken up eventually. @@ -714,13 +765,17 @@ static void swift_taskGroup_wait_next_throwingImpl( case PollStatus::Success: SWIFT_TASK_DEBUG_LOG("poll group = %p, task = %p, ready task available = %p", group, waitingTask, polled.retainedTask); - fillGroupNextResult(context, polled); + if (this->eagerlyReleaseCompleteTasks) { + fillGroupNextVoidResult(context, polled); + } else { + fillGroupNextResult(context, polled); + } + if (auto completedTask = polled.retainedTask) { // it would be null for PollStatus::Empty, then we don't need to release - group->detachChild(polled.retainedTask); - swift_release(polled.retainedTask); + group->detachChild(completedTask); + swift_release(completedTask); } - return waitingTask->runInFullyEstablishedContext(); } } @@ -755,8 +810,8 @@ PollResult TaskGroupImpl::poll(AsyncTask *waitingTask) { // ==== 2) Ready task was polled, return with it immediately ----------------- if (assumed.readyTasks()) { - SWIFT_TASK_DEBUG_LOG("poll group = %p, group has ready tasks = %d", - this, assumed.readyTasks()); + SWIFT_TASK_DEBUG_LOG("poll group = %p, tasks ready=%d, pending=%d", + this, assumed.readyTasks(), assumed.pendingTasks()); auto assumedStatus = assumed.status; auto newStatus = TaskGroupImpl::GroupStatus{assumedStatus}; diff --git a/stdlib/public/BackDeployConcurrency/TaskGroup.swift b/stdlib/public/BackDeployConcurrency/TaskGroup.swift index 6955c5ee52552..55e9fca22f6a9 100644 --- a/stdlib/public/BackDeployConcurrency/TaskGroup.swift +++ b/stdlib/public/BackDeployConcurrency/TaskGroup.swift @@ -464,6 +464,7 @@ public struct ThrowingTaskGroup { } } + // TODO(ktoso): doesn't seem to be used? @usableFromInline internal mutating func _waitForAll() async throws { while let _ = try await next() { } diff --git a/stdlib/public/Concurrency/TaskGroup.cpp b/stdlib/public/Concurrency/TaskGroup.cpp index e9cd764acbdd6..ac1c3c1d4f214 100644 --- a/stdlib/public/Concurrency/TaskGroup.cpp +++ b/stdlib/public/Concurrency/TaskGroup.cpp @@ -137,6 +137,15 @@ class TaskGroupImpl: public TaskGroupTaskStatusRecord { /*task*/ asyncTask }; } + + static PollResult getVoid() { + return PollResult{ + /*status*/ PollStatus::Empty, + /*storage*/ nullptr, + /*successType*/nullptr, // TODO: Void.self + /*task*/ nullptr + }; + } }; /// An item within the message queue of a group. @@ -323,11 +332,14 @@ class TaskGroupImpl: public TaskGroupTaskStatusRecord { friend class ::swift::AsyncTask; public: - explicit TaskGroupImpl(const Metadata *T) + const bool eagerlyReleaseCompleteTasks; + explicit TaskGroupImpl(const Metadata *T, bool eagerlyReleaseCompleteTasks) : TaskGroupTaskStatusRecord(), status(GroupStatus::initial().status), readyQueue(), - waitQueue(nullptr), successType(T) {} + waitQueue(nullptr), + successType(T), + eagerlyReleaseCompleteTasks(eagerlyReleaseCompleteTasks) {} TaskGroupTaskStatusRecord *getTaskRecord() { return reinterpret_cast(this); @@ -489,7 +501,7 @@ SWIFT_CC(swift) static void swift_taskGroup_initializeImpl(TaskGroup *group, const Metadata *T) { SWIFT_TASK_DEBUG_LOG("creating task group = %p", group); - TaskGroupImpl *impl = ::new (group) TaskGroupImpl(T); + TaskGroupImpl *impl = ::new (group) TaskGroupImpl(T, /*eagerlyReleaseCompleteTasks=*/true); auto record = impl->getTaskRecord(); assert(impl == record && "the group IS the task record"); @@ -545,6 +557,12 @@ static void swift_taskGroup_destroyImpl(TaskGroup *group) { void TaskGroupImpl::destroy() { SWIFT_TASK_DEBUG_LOG("destroying task group = %p", this); + if (!this->isEmpty()) { + auto status = this->statusLoadRelaxed(); + SWIFT_TASK_DEBUG_LOG("destroying task group = %p, tasks .ready = %d, .pending = %d", + this, status.readyTasks(), status.pendingTasks()); + } + assert(this->isEmpty() && "Attempted to destroy non-empty task group!"); // First, remove the group from the task and deallocate the record removeStatusRecord(getTaskRecord()); @@ -585,6 +603,29 @@ static void fillGroupNextResult(TaskFutureWaitAsyncContext *context, // Initialize the result as an Optional. const Metadata *successType = result.successType; OpaqueValue *destPtr = context->successResultPointer; + successType->vw_storeEnumTagSinglePayload(destPtr, 1, 1); + return; + } + } +} + +static void fillGroupNextVoidResult(TaskFutureWaitAsyncContext *context, + PollResult result) { + /// Fill in the result value + switch (result.status) { + case PollStatus::MustWait: + assert(false && "filling a waiting status?"); + return; + + case PollStatus::Error: { + assert(false && "cannot have errors"); + return; + } + + case PollStatus::Success: { + // Initialize the result as an Optional. + const Metadata *successType = result.successType; + OpaqueValue *destPtr = context->successResultPointer; // TODO: figure out a way to try to optimistically take the // value out of the finished task's future, if there are no // remaining references to it. @@ -605,6 +646,16 @@ static void fillGroupNextResult(TaskFutureWaitAsyncContext *context, // TaskGroup is locked upon entry and exit void TaskGroupImpl::enqueueCompletedTask(AsyncTask *completedTask, bool hadErrorResult) { + if (this->eagerlyReleaseCompleteTasks) { + SWIFT_TASK_DEBUG_LOG("group has no waiting tasks, eager release mode; release result task = %p", + completedTask); + // DO NOT RETAIN THE TASK. + // We know it is Void, so we don't need to store the result; + // By releasing tasks eagerly we're able to keep "infinite" task groups, + // running, that never consume their values. Even more-so, + return; + } + // Retain the task while it is in the queue; it must remain alive until // it is found by poll. This retain will balanced by the release in poll. swift_retain(completedTask); @@ -646,6 +697,9 @@ void TaskGroupImpl::offer(AsyncTask *completedTask, AsyncContext *context) { // W:n R:0 P:1 -> W:y R:1 P:3 // complete immediately, 2 more pending tasks auto assumed = statusAddReadyAssumeAcquire(); + SWIFT_TASK_DEBUG_LOG("group %p, ready: %d, pending: %d", + this, assumed.readyTasks(), assumed.pendingTasks()); + auto asyncContextPrefix = reinterpret_cast( reinterpret_cast(context) - sizeof(FutureAsyncContextPrefix)); bool hadErrorResult = false; @@ -732,8 +786,6 @@ void TaskGroupImpl::offer(AsyncTask *completedTask, AsyncContext *context) { // ready for it, and will process it immediately without suspending. assert(!waitQueue.load(std::memory_order_relaxed)); - SWIFT_TASK_DEBUG_LOG("group has no waiting tasks, RETAIN and store ready task = %p", - completedTask); enqueueCompletedTask(completedTask, hadErrorResult); unlock(); // TODO: remove fragment lock, and use status for synchronization } @@ -810,7 +862,11 @@ static void swift_taskGroup_wait_next_throwingImpl( case PollStatus::Success: SWIFT_TASK_DEBUG_LOG("poll group = %p, task = %p, ready task available = %p", group, waitingTask, polled.retainedTask); - fillGroupNextResult(context, polled); + if (group->eagerlyReleaseCompleteTasks) { + fillGroupNextVoidResult(context, polled); + } else { + fillGroupNextResult(context, polled); + } if (auto completedTask = polled.retainedTask) { // Remove the child from the task group's running tasks list. _swift_taskGroup_detachChild(asAbstract(group), completedTask); @@ -859,8 +915,8 @@ reevaluate_if_taskgroup_has_results:; // ==== 2) Ready task was polled, return with it immediately ----------------- if (assumed.readyTasks()) { - SWIFT_TASK_DEBUG_LOG("poll group = %p, group has ready tasks = %d", - this, assumed.readyTasks()); + SWIFT_TASK_DEBUG_LOG("poll group = %p, tasks .ready = %d, .pending = %d", + this, assumed.readyTasks(), assumed.pendingTasks()); auto assumedStatus = assumed.status; auto newStatus = TaskGroupImpl::GroupStatus{assumedStatus}; @@ -877,6 +933,17 @@ reevaluate_if_taskgroup_has_results:; // Success! We are allowed to poll. ReadyQueueItem item; + if (this->eagerlyReleaseCompleteTasks) { + SWIFT_TASK_DEBUG_LOG("poll group = %p; polled in eager-release mode; make up Void value to yield", + this, assumed.readyTasks(), assumed.pendingTasks()); + result.status = PollStatus::Success; + result.storage = nullptr; + result.retainedTask = nullptr; + result.successType = this->successType; + unlock(); // TODO: remove fragment lock, and use status for synchronization + return result; + } + bool taskDequeued = readyQueue.dequeue(item); assert(taskDequeued); (void) taskDequeued; @@ -1035,10 +1102,14 @@ void swift::_swift_taskGroup_cancelAllChildren(TaskGroup *group) { // ============================================================================= // ==== addPending ------------------------------------------------------------- + SWIFT_CC(swift) static bool swift_taskGroup_addPendingImpl(TaskGroup *group, bool unconditionally) { - auto assumedStatus = asImpl(group)->statusAddPendingTaskRelaxed(unconditionally); - return !assumedStatus.isCancelled(); + auto assumed = asImpl(group)->statusAddPendingTaskRelaxed(unconditionally); + SWIFT_TASK_DEBUG_LOG("add pending %s to group %p, tasks pending = %d", + unconditionally ? "unconditionally" : "", + group, assumed.pendingTasks()); + return !assumed.isCancelled(); } #define OVERRIDE_TASK_GROUP COMPATIBILITY_OVERRIDE diff --git a/stdlib/public/Concurrency/TaskGroup.swift b/stdlib/public/Concurrency/TaskGroup.swift index 0802ad8b2e59b..b8468e070c7c2 100644 --- a/stdlib/public/Concurrency/TaskGroup.swift +++ b/stdlib/public/Concurrency/TaskGroup.swift @@ -10,8 +10,10 @@ // //===----------------------------------------------------------------------===// + import Swift @_implementationOnly import _SwiftConcurrencyShims +import Darwin // ==== TaskGroup -------------------------------------------------------------- @@ -90,6 +92,32 @@ public func withTaskGroup( #endif } +@available(SwiftStdlib 5.1, *) +@_unsafeInheritExecutor +@inlinable +public func withTaskGroupSuper( + of childTaskResultType: Void.Type = Void.self, + returning returnType: GroupResult.Type = GroupResult.self, + body: (inout TaskGroup) async -> GroupResult +) async -> GroupResult { + #if compiler(>=5.5) && $BuiltinTaskGroupWithArgument + + let _group = Builtin.createTaskGroup(Void.self) + var group = TaskGroup(group: _group) + + // Run the withTaskGroup body. + let result = await body(&group) + + await group.awaitAllRemainingTasks() + + Builtin.destroyTaskGroup(_group) + return result + + #else + fatalError("Swift compiler is incompatible with this SDK version") + #endif +} + /// Starts a new scope that can contain a dynamic number of throwing child tasks. /// /// A group waits for all of its child tasks @@ -556,7 +584,7 @@ public struct ThrowingTaskGroup { @usableFromInline internal mutating func _waitForAll() async throws { - while let _ = try await next() { } + while let _ = try? await next() { } } /// Wait for all of the group's remaining tasks to complete. diff --git a/test/Concurrency/Runtime/async_taskgroup_void_neverConsumingTasks.swift b/test/Concurrency/Runtime/async_taskgroup_void_neverConsumingTasks.swift new file mode 100644 index 0000000000000..987de6486c9fe --- /dev/null +++ b/test/Concurrency/Runtime/async_taskgroup_void_neverConsumingTasks.swift @@ -0,0 +1,76 @@ +// RUN: %target-run-simple-swift( -Xfrontend -disable-availability-checking -parse-as-library) | %FileCheck %s --dump-input=always +// REQUIRES: executable_test +// REQUIRES: concurrency +// REQUIRES: concurrency_runtime +// UNSUPPORTED: back_deployment_runtime +// UNSUPPORTED: OS=linux-gnu +import Darwin + +actor Waiter { + let until: Int + var count: Int + + var cc: CheckedContinuation? + + init(until: Int) { + self.until = until + self.count = 0 + } + + func increment() { + self.count += 1 + fputs("> increment (\(self.count)/\(self.until))\n", stderr); + if self.until <= self.count { + if let cc = self.cc { + cc.resume(returning: self.count) + } + } + } + + func wait() async -> Int { + if self.until <= self.count { + fputs("> RETURN in Waiter\n", stderr); + return self.count + } + + return await withCheckedContinuation { cc in + fputs("> WAIT in Waiter\n", stderr); + self.cc = cc + } + } +} + +@available(SwiftStdlib 5.1, *) +func test_taskGroup_void_neverConsume() async { + let until = 100_000_000 + let waiter = Waiter(until: until) + + let allTasks = await withTaskGroupSuper(of: Void.self, returning: Int.self) { group in + for n in 1...until { + fputs("> enqueue: \(n)\n", stderr); + group.addTask { + fputs("> run: \(n)\n", stderr); + try? await Task.sleep(until: .now + .milliseconds(100), clock: .continuous) + await waiter.increment() + } + } + + let void = await next() + + // wait a little bit, so some tasks complete before we hit the implicit "wait at end of task group scope" + try? await Task.sleep(until: .now + .milliseconds(500), clock: .continuous) + + return until + } + + // CHECK: all tasks: 100 + print("all tasks: \(allTasks)") + print("actor: \(allTasks)") +} + +@available(SwiftStdlib 5.1, *) +@main struct Main { + static func main() async { + await test_taskGroup_void_neverConsume() + } +} From 0ec95e18c6c1d4a8b1bbfa1ef3b592d14d34e66b Mon Sep 17 00:00:00 2001 From: Konrad `ktoso` Malawski Date: Mon, 28 Nov 2022 11:14:33 +0900 Subject: [PATCH 2/8] Revert "[Concurrency] Optimize Void task group, to not store completed tasks anymore" This reverts commit fd2eb5f022278e136f517778c1e12b4671632021. --- .../BackDeployConcurrency/TaskGroup.cpp | 99 +++++-------------- .../BackDeployConcurrency/TaskGroup.swift | 1 - stdlib/public/Concurrency/TaskGroup.cpp | 91 ++--------------- stdlib/public/Concurrency/TaskGroup.swift | 29 +----- ...c_taskgroup_void_neverConsumingTasks.swift | 76 -------------- 5 files changed, 33 insertions(+), 263 deletions(-) delete mode 100644 test/Concurrency/Runtime/async_taskgroup_void_neverConsumingTasks.swift diff --git a/stdlib/public/BackDeployConcurrency/TaskGroup.cpp b/stdlib/public/BackDeployConcurrency/TaskGroup.cpp index acdc25e8529df..359d2d8603dcb 100644 --- a/stdlib/public/BackDeployConcurrency/TaskGroup.cpp +++ b/stdlib/public/BackDeployConcurrency/TaskGroup.cpp @@ -133,15 +133,6 @@ class TaskGroupImpl: public TaskGroupTaskStatusRecord { /*task*/ asyncTask }; } - - static PollResult getVoid() { - return PollResult{ - /*status*/ PollStatus::Success, - /*storage*/ nullptr, - /*successType*/nullptr, // TODO: Void.self - /*task*/ nullptr - }; - } }; /// An item within the message queue of a group. @@ -564,37 +555,13 @@ static void fillGroupNextResult(TaskFutureWaitAsyncContext *context, } } -static void fillGroupNextVoidResult(TaskFutureWaitAsyncContext *context, - PollResult result) { - /// Fill in the result value - switch (result.status) { - case PollStatus::MustWait: - assert(false && "filling a waiting status?"); - return; - - case PollStatus::Error: { - assert(false && "this type of task group cannot throw"); - return; - } - - case PollStatus::Success: - case PollStatus::Empty: { - // "Success" type is guaranteed to be Void - // Initialize the result as a nil Optional. - const Metadata *successType = result.successType; - OpaqueValue *destPtr = context->successResultPointer; - successType->vw_storeEnumTagSinglePayload(destPtr, 1, 1); - return; - } - } -} - void TaskGroupImpl::offer(AsyncTask *completedTask, AsyncContext *context) { assert(completedTask); assert(completedTask->isFuture()); assert(completedTask->hasChildFragment()); assert(completedTask->hasGroupChildFragment()); assert(completedTask->groupChildFragment()->getGroup() == asAbstract(this)); + SWIFT_TASK_DEBUG_LOG("offer task %p to group %p", completedTask, this); mutex.lock(); // TODO: remove fragment lock, and use status for synchronization @@ -605,7 +572,6 @@ void TaskGroupImpl::offer(AsyncTask *completedTask, AsyncContext *context) { // W:n R:0 P:1 -> W:y R:1 P:1 // complete immediately // W:n R:0 P:1 -> W:y R:1 P:3 // complete immediately, 2 more pending tasks auto assumed = statusAddReadyAssumeAcquire(); - SWIFT_TASK_DEBUG_LOG("offer task %p to group %p, tasks pending = %d", completedTask, assumed.pendingTasks()); auto asyncContextPrefix = reinterpret_cast( reinterpret_cast(context) - sizeof(FutureAsyncContextPrefix)); @@ -641,13 +607,7 @@ void TaskGroupImpl::offer(AsyncTask *completedTask, AsyncContext *context) { static_cast( waitingTask->ResumeContext); - if (this->eagerlyReleaseCompleteTasks) { - fprintf(stderr, "[%s:%d](%s) offer: eagerlyReleaseCompleteTasks\n", __FILE_NAME__, __LINE__, __FUNCTION__); - fillGroupNextResult(waitingContext, result); - } else { - fprintf(stderr, "[%s:%d](%s) offer: NOT\n", __FILE_NAME__, __LINE__, __FUNCTION__); - fillGroupNextResult(waitingContext, result); - } + fillGroupNextResult(waitingContext, result); detachChild(result.retainedTask); _swift_tsan_acquire(static_cast(waitingTask)); @@ -667,31 +627,20 @@ void TaskGroupImpl::offer(AsyncTask *completedTask, AsyncContext *context) { // queue when a task polls during next() it will notice that we have a value // ready for it, and will process it immediately without suspending. assert(!waitQueue.load(std::memory_order_relaxed)); - if (!this->eagerlyReleaseCompleteTasks) { - SWIFT_TASK_DEBUG_LOG("group has no waiting tasks, RETAIN and store ready task = %p", - completedTask); - // Retain the task while it is in the queue; - // it must remain alive until the task group is alive. - swift_retain(completedTask); - - auto readyItem = ReadyQueueItem::get( - hadErrorResult ? ReadyStatus::Error : ReadyStatus::Success, - completedTask - ); - - assert(completedTask == readyItem.getTask()); - assert(readyItem.getTask()->isFuture()); - readyQueue.enqueue(readyItem); - } else { - assert(this->eagerlyReleaseCompleteTasks); - // DO NOT retain the task; and do not store the value in the readyQueue at all (!) - // - // In the "eagerlyRelease" completed tasks mode, we are guaranteed that tasks are of Void type, - // and thus there is no necessity to store values, because we can always "make them up" when polled. - // From the user's perspective, it is indistinguishable if they received the "real value" or one we "made up", - // because Void is always the same, and cannot be examined in any way to determine if it was the "actual" Void or not. - } + SWIFT_TASK_DEBUG_LOG("group has no waiting tasks, RETAIN and store ready task = %p", + completedTask); + // Retain the task while it is in the queue; + // it must remain alive until the task group is alive. + swift_retain(completedTask); + + auto readyItem = ReadyQueueItem::get( + hadErrorResult ? ReadyStatus::Error : ReadyStatus::Success, + completedTask + ); + assert(completedTask == readyItem.getTask()); + assert(readyItem.getTask()->isFuture()); + readyQueue.enqueue(readyItem); mutex.unlock(); // TODO: remove fragment lock, and use status for synchronization return; } @@ -749,7 +698,7 @@ static void swift_taskGroup_wait_next_throwingImpl( PollResult polled = group->poll(waitingTask); switch (polled.status) { case PollStatus::MustWait: - SWIFT_TASK_DEBUG_LOG("poll group = %p, tasks ready = 0, waiting task = %p", + SWIFT_TASK_DEBUG_LOG("poll group = %p, no ready tasks, waiting task = %p", group, waitingTask); // The waiting task has been queued on the channel, // there were pending tasks so it will be woken up eventually. @@ -765,17 +714,13 @@ static void swift_taskGroup_wait_next_throwingImpl( case PollStatus::Success: SWIFT_TASK_DEBUG_LOG("poll group = %p, task = %p, ready task available = %p", group, waitingTask, polled.retainedTask); - if (this->eagerlyReleaseCompleteTasks) { - fillGroupNextVoidResult(context, polled); - } else { - fillGroupNextResult(context, polled); - } - + fillGroupNextResult(context, polled); if (auto completedTask = polled.retainedTask) { // it would be null for PollStatus::Empty, then we don't need to release - group->detachChild(completedTask); - swift_release(completedTask); + group->detachChild(polled.retainedTask); + swift_release(polled.retainedTask); } + return waitingTask->runInFullyEstablishedContext(); } } @@ -810,8 +755,8 @@ PollResult TaskGroupImpl::poll(AsyncTask *waitingTask) { // ==== 2) Ready task was polled, return with it immediately ----------------- if (assumed.readyTasks()) { - SWIFT_TASK_DEBUG_LOG("poll group = %p, tasks ready=%d, pending=%d", - this, assumed.readyTasks(), assumed.pendingTasks()); + SWIFT_TASK_DEBUG_LOG("poll group = %p, group has ready tasks = %d", + this, assumed.readyTasks()); auto assumedStatus = assumed.status; auto newStatus = TaskGroupImpl::GroupStatus{assumedStatus}; diff --git a/stdlib/public/BackDeployConcurrency/TaskGroup.swift b/stdlib/public/BackDeployConcurrency/TaskGroup.swift index 55e9fca22f6a9..6955c5ee52552 100644 --- a/stdlib/public/BackDeployConcurrency/TaskGroup.swift +++ b/stdlib/public/BackDeployConcurrency/TaskGroup.swift @@ -464,7 +464,6 @@ public struct ThrowingTaskGroup { } } - // TODO(ktoso): doesn't seem to be used? @usableFromInline internal mutating func _waitForAll() async throws { while let _ = try await next() { } diff --git a/stdlib/public/Concurrency/TaskGroup.cpp b/stdlib/public/Concurrency/TaskGroup.cpp index ac1c3c1d4f214..e9cd764acbdd6 100644 --- a/stdlib/public/Concurrency/TaskGroup.cpp +++ b/stdlib/public/Concurrency/TaskGroup.cpp @@ -137,15 +137,6 @@ class TaskGroupImpl: public TaskGroupTaskStatusRecord { /*task*/ asyncTask }; } - - static PollResult getVoid() { - return PollResult{ - /*status*/ PollStatus::Empty, - /*storage*/ nullptr, - /*successType*/nullptr, // TODO: Void.self - /*task*/ nullptr - }; - } }; /// An item within the message queue of a group. @@ -332,14 +323,11 @@ class TaskGroupImpl: public TaskGroupTaskStatusRecord { friend class ::swift::AsyncTask; public: - const bool eagerlyReleaseCompleteTasks; - explicit TaskGroupImpl(const Metadata *T, bool eagerlyReleaseCompleteTasks) + explicit TaskGroupImpl(const Metadata *T) : TaskGroupTaskStatusRecord(), status(GroupStatus::initial().status), readyQueue(), - waitQueue(nullptr), - successType(T), - eagerlyReleaseCompleteTasks(eagerlyReleaseCompleteTasks) {} + waitQueue(nullptr), successType(T) {} TaskGroupTaskStatusRecord *getTaskRecord() { return reinterpret_cast(this); @@ -501,7 +489,7 @@ SWIFT_CC(swift) static void swift_taskGroup_initializeImpl(TaskGroup *group, const Metadata *T) { SWIFT_TASK_DEBUG_LOG("creating task group = %p", group); - TaskGroupImpl *impl = ::new (group) TaskGroupImpl(T, /*eagerlyReleaseCompleteTasks=*/true); + TaskGroupImpl *impl = ::new (group) TaskGroupImpl(T); auto record = impl->getTaskRecord(); assert(impl == record && "the group IS the task record"); @@ -557,12 +545,6 @@ static void swift_taskGroup_destroyImpl(TaskGroup *group) { void TaskGroupImpl::destroy() { SWIFT_TASK_DEBUG_LOG("destroying task group = %p", this); - if (!this->isEmpty()) { - auto status = this->statusLoadRelaxed(); - SWIFT_TASK_DEBUG_LOG("destroying task group = %p, tasks .ready = %d, .pending = %d", - this, status.readyTasks(), status.pendingTasks()); - } - assert(this->isEmpty() && "Attempted to destroy non-empty task group!"); // First, remove the group from the task and deallocate the record removeStatusRecord(getTaskRecord()); @@ -603,29 +585,6 @@ static void fillGroupNextResult(TaskFutureWaitAsyncContext *context, // Initialize the result as an Optional. const Metadata *successType = result.successType; OpaqueValue *destPtr = context->successResultPointer; - successType->vw_storeEnumTagSinglePayload(destPtr, 1, 1); - return; - } - } -} - -static void fillGroupNextVoidResult(TaskFutureWaitAsyncContext *context, - PollResult result) { - /// Fill in the result value - switch (result.status) { - case PollStatus::MustWait: - assert(false && "filling a waiting status?"); - return; - - case PollStatus::Error: { - assert(false && "cannot have errors"); - return; - } - - case PollStatus::Success: { - // Initialize the result as an Optional. - const Metadata *successType = result.successType; - OpaqueValue *destPtr = context->successResultPointer; // TODO: figure out a way to try to optimistically take the // value out of the finished task's future, if there are no // remaining references to it. @@ -646,16 +605,6 @@ static void fillGroupNextVoidResult(TaskFutureWaitAsyncContext *context, // TaskGroup is locked upon entry and exit void TaskGroupImpl::enqueueCompletedTask(AsyncTask *completedTask, bool hadErrorResult) { - if (this->eagerlyReleaseCompleteTasks) { - SWIFT_TASK_DEBUG_LOG("group has no waiting tasks, eager release mode; release result task = %p", - completedTask); - // DO NOT RETAIN THE TASK. - // We know it is Void, so we don't need to store the result; - // By releasing tasks eagerly we're able to keep "infinite" task groups, - // running, that never consume their values. Even more-so, - return; - } - // Retain the task while it is in the queue; it must remain alive until // it is found by poll. This retain will balanced by the release in poll. swift_retain(completedTask); @@ -697,9 +646,6 @@ void TaskGroupImpl::offer(AsyncTask *completedTask, AsyncContext *context) { // W:n R:0 P:1 -> W:y R:1 P:3 // complete immediately, 2 more pending tasks auto assumed = statusAddReadyAssumeAcquire(); - SWIFT_TASK_DEBUG_LOG("group %p, ready: %d, pending: %d", - this, assumed.readyTasks(), assumed.pendingTasks()); - auto asyncContextPrefix = reinterpret_cast( reinterpret_cast(context) - sizeof(FutureAsyncContextPrefix)); bool hadErrorResult = false; @@ -786,6 +732,8 @@ void TaskGroupImpl::offer(AsyncTask *completedTask, AsyncContext *context) { // ready for it, and will process it immediately without suspending. assert(!waitQueue.load(std::memory_order_relaxed)); + SWIFT_TASK_DEBUG_LOG("group has no waiting tasks, RETAIN and store ready task = %p", + completedTask); enqueueCompletedTask(completedTask, hadErrorResult); unlock(); // TODO: remove fragment lock, and use status for synchronization } @@ -862,11 +810,7 @@ static void swift_taskGroup_wait_next_throwingImpl( case PollStatus::Success: SWIFT_TASK_DEBUG_LOG("poll group = %p, task = %p, ready task available = %p", group, waitingTask, polled.retainedTask); - if (group->eagerlyReleaseCompleteTasks) { - fillGroupNextVoidResult(context, polled); - } else { - fillGroupNextResult(context, polled); - } + fillGroupNextResult(context, polled); if (auto completedTask = polled.retainedTask) { // Remove the child from the task group's running tasks list. _swift_taskGroup_detachChild(asAbstract(group), completedTask); @@ -915,8 +859,8 @@ reevaluate_if_taskgroup_has_results:; // ==== 2) Ready task was polled, return with it immediately ----------------- if (assumed.readyTasks()) { - SWIFT_TASK_DEBUG_LOG("poll group = %p, tasks .ready = %d, .pending = %d", - this, assumed.readyTasks(), assumed.pendingTasks()); + SWIFT_TASK_DEBUG_LOG("poll group = %p, group has ready tasks = %d", + this, assumed.readyTasks()); auto assumedStatus = assumed.status; auto newStatus = TaskGroupImpl::GroupStatus{assumedStatus}; @@ -933,17 +877,6 @@ reevaluate_if_taskgroup_has_results:; // Success! We are allowed to poll. ReadyQueueItem item; - if (this->eagerlyReleaseCompleteTasks) { - SWIFT_TASK_DEBUG_LOG("poll group = %p; polled in eager-release mode; make up Void value to yield", - this, assumed.readyTasks(), assumed.pendingTasks()); - result.status = PollStatus::Success; - result.storage = nullptr; - result.retainedTask = nullptr; - result.successType = this->successType; - unlock(); // TODO: remove fragment lock, and use status for synchronization - return result; - } - bool taskDequeued = readyQueue.dequeue(item); assert(taskDequeued); (void) taskDequeued; @@ -1102,14 +1035,10 @@ void swift::_swift_taskGroup_cancelAllChildren(TaskGroup *group) { // ============================================================================= // ==== addPending ------------------------------------------------------------- - SWIFT_CC(swift) static bool swift_taskGroup_addPendingImpl(TaskGroup *group, bool unconditionally) { - auto assumed = asImpl(group)->statusAddPendingTaskRelaxed(unconditionally); - SWIFT_TASK_DEBUG_LOG("add pending %s to group %p, tasks pending = %d", - unconditionally ? "unconditionally" : "", - group, assumed.pendingTasks()); - return !assumed.isCancelled(); + auto assumedStatus = asImpl(group)->statusAddPendingTaskRelaxed(unconditionally); + return !assumedStatus.isCancelled(); } #define OVERRIDE_TASK_GROUP COMPATIBILITY_OVERRIDE diff --git a/stdlib/public/Concurrency/TaskGroup.swift b/stdlib/public/Concurrency/TaskGroup.swift index b8468e070c7c2..ae2a961922a69 100644 --- a/stdlib/public/Concurrency/TaskGroup.swift +++ b/stdlib/public/Concurrency/TaskGroup.swift @@ -10,7 +10,6 @@ // //===----------------------------------------------------------------------===// - import Swift @_implementationOnly import _SwiftConcurrencyShims import Darwin @@ -92,32 +91,6 @@ public func withTaskGroup( #endif } -@available(SwiftStdlib 5.1, *) -@_unsafeInheritExecutor -@inlinable -public func withTaskGroupSuper( - of childTaskResultType: Void.Type = Void.self, - returning returnType: GroupResult.Type = GroupResult.self, - body: (inout TaskGroup) async -> GroupResult -) async -> GroupResult { - #if compiler(>=5.5) && $BuiltinTaskGroupWithArgument - - let _group = Builtin.createTaskGroup(Void.self) - var group = TaskGroup(group: _group) - - // Run the withTaskGroup body. - let result = await body(&group) - - await group.awaitAllRemainingTasks() - - Builtin.destroyTaskGroup(_group) - return result - - #else - fatalError("Swift compiler is incompatible with this SDK version") - #endif -} - /// Starts a new scope that can contain a dynamic number of throwing child tasks. /// /// A group waits for all of its child tasks @@ -584,7 +557,7 @@ public struct ThrowingTaskGroup { @usableFromInline internal mutating func _waitForAll() async throws { - while let _ = try? await next() { } + while let _ = try await next() { } } /// Wait for all of the group's remaining tasks to complete. diff --git a/test/Concurrency/Runtime/async_taskgroup_void_neverConsumingTasks.swift b/test/Concurrency/Runtime/async_taskgroup_void_neverConsumingTasks.swift deleted file mode 100644 index 987de6486c9fe..0000000000000 --- a/test/Concurrency/Runtime/async_taskgroup_void_neverConsumingTasks.swift +++ /dev/null @@ -1,76 +0,0 @@ -// RUN: %target-run-simple-swift( -Xfrontend -disable-availability-checking -parse-as-library) | %FileCheck %s --dump-input=always -// REQUIRES: executable_test -// REQUIRES: concurrency -// REQUIRES: concurrency_runtime -// UNSUPPORTED: back_deployment_runtime -// UNSUPPORTED: OS=linux-gnu -import Darwin - -actor Waiter { - let until: Int - var count: Int - - var cc: CheckedContinuation? - - init(until: Int) { - self.until = until - self.count = 0 - } - - func increment() { - self.count += 1 - fputs("> increment (\(self.count)/\(self.until))\n", stderr); - if self.until <= self.count { - if let cc = self.cc { - cc.resume(returning: self.count) - } - } - } - - func wait() async -> Int { - if self.until <= self.count { - fputs("> RETURN in Waiter\n", stderr); - return self.count - } - - return await withCheckedContinuation { cc in - fputs("> WAIT in Waiter\n", stderr); - self.cc = cc - } - } -} - -@available(SwiftStdlib 5.1, *) -func test_taskGroup_void_neverConsume() async { - let until = 100_000_000 - let waiter = Waiter(until: until) - - let allTasks = await withTaskGroupSuper(of: Void.self, returning: Int.self) { group in - for n in 1...until { - fputs("> enqueue: \(n)\n", stderr); - group.addTask { - fputs("> run: \(n)\n", stderr); - try? await Task.sleep(until: .now + .milliseconds(100), clock: .continuous) - await waiter.increment() - } - } - - let void = await next() - - // wait a little bit, so some tasks complete before we hit the implicit "wait at end of task group scope" - try? await Task.sleep(until: .now + .milliseconds(500), clock: .continuous) - - return until - } - - // CHECK: all tasks: 100 - print("all tasks: \(allTasks)") - print("actor: \(allTasks)") -} - -@available(SwiftStdlib 5.1, *) -@main struct Main { - static func main() async { - await test_taskGroup_void_neverConsume() - } -} From 757b586f50bcd4b30c91315129693db5baa8c4ba Mon Sep 17 00:00:00 2001 From: Konrad `ktoso` Malawski Date: Fri, 25 Nov 2022 18:40:30 +0900 Subject: [PATCH 3/8] [Concurrency] TaskPool implementation A TaskPool is similar to a TaskGroup, but functions very differently. Tasks added to the pool execute the same way as in a group, however they cannot be awaited on explicitly. The implementation does not track ready tasks at all and therefore is not able to track the exact number of times a next() would have to be resumed. Instead, it automatically removes completed child tasks, and removes their pending counts from the pool status. This allows us to implement a waitAll() however it is not possible to wait for individual completions (one could "wait for the next completion", but that is not very useful, as we would not know _which_ one it was). A TaskPool is useful in "runs forever" core loops of rpc and http servers, where the top level task of the server runs a TaskPool, and dispatches new tasks to handle each incoming request; and these may be running concurrently. We want to use child tasks to keep this efficient, but using a Group this is not possible because we must collect the tasks as soon as they complete, but cannot do so from the task group since it is a single task awaiting on a sequence. --- include/swift/ABI/MetadataValues.h | 42 +- include/swift/ABI/Task.h | 58 +- include/swift/ABI/TaskLocal.h | 1 + include/swift/ABI/TaskOptions.h | 17 + include/swift/ABI/TaskPool.h | 61 ++ include/swift/ABI/TaskStatus.h | 89 ++ include/swift/AST/Builtins.def | 19 + include/swift/Basic/Features.def | 3 +- include/swift/Runtime/Concurrency.h | 114 ++- include/swift/Runtime/RuntimeFunctions.def | 18 + lib/AST/ASTPrinter.cpp | 4 + lib/AST/Builtins.cpp | 35 +- lib/IRGen/Callee.h | 3 + lib/IRGen/GenBuiltin.cpp | 22 +- lib/IRGen/GenCall.cpp | 27 + lib/IRGen/GenCall.h | 1 + lib/IRGen/GenConcurrency.cpp | 30 + lib/IRGen/GenConcurrency.h | 6 + lib/IRGen/IRGenModule.cpp | 6 + lib/IRGen/IRGenModule.h | 2 + lib/IRGen/IRGenSIL.cpp | 3 + lib/SIL/IR/OperandOwnership.cpp | 19 +- lib/SIL/IR/ValueOwnership.cpp | 3 + lib/SIL/Utils/MemAccessUtils.cpp | 3 + lib/SILGen/SILGenBuiltin.cpp | 39 + .../AccessEnforcementReleaseSinking.cpp | 3 + .../BackDeployConcurrency/CMakeLists.txt | 1 + .../CompatibilityOverrideConcurrency.def | 49 + .../ConcurrencyRuntime.h | 112 +++ stdlib/public/BackDeployConcurrency/Task.cpp | 17 +- stdlib/public/BackDeployConcurrency/Task.h | 1 + .../public/BackDeployConcurrency/TaskLocal.h | 1 + .../public/BackDeployConcurrency/TaskPool.cpp | 934 ++++++++++++++++++ .../public/BackDeployConcurrency/TaskPool.h | 61 ++ .../BackDeployConcurrency/TaskPrivate.h | 3 +- .../BackDeployConcurrency/TaskStatus.cpp | 33 + .../CompatibilityOverrideConcurrency.def | 44 + stdlib/public/Concurrency/AsyncLet.cpp | 2 +- stdlib/public/Concurrency/CMakeLists.txt | 2 + stdlib/public/Concurrency/Task.cpp | 83 +- stdlib/public/Concurrency/TaskGroup.cpp | 5 +- stdlib/public/Concurrency/TaskGroup.swift | 1 - stdlib/public/Concurrency/TaskPool.cpp | 910 +++++++++++++++++ stdlib/public/Concurrency/TaskPool.swift | 354 +++++++ stdlib/public/Concurrency/TaskPrivate.h | 23 +- stdlib/public/Concurrency/TaskStatus.cpp | 66 +- stdlib/public/Concurrency/Tracing.h | 8 +- stdlib/public/Concurrency/TracingSignpost.h | 14 +- stdlib/public/Concurrency/TracingStubs.h | 6 +- .../CompatibilityOverrideConcurrency.def | 45 + .../include/Concurrency/Task.h | 42 + .../include/Concurrency/TaskLocal.h | 1 + .../async_taskpool_neverConsumingTasks.swift | 75 ++ test/SILGen/async_builtins.swift | 8 + .../Operations/DumpConcurrency.swift | 1 + .../CompatibilityOverrideConcurrency.cpp | 2 + 56 files changed, 3462 insertions(+), 70 deletions(-) create mode 100644 include/swift/ABI/TaskPool.h create mode 100644 stdlib/public/BackDeployConcurrency/TaskPool.cpp create mode 100644 stdlib/public/BackDeployConcurrency/TaskPool.h create mode 100644 stdlib/public/Concurrency/TaskPool.cpp create mode 100644 stdlib/public/Concurrency/TaskPool.swift create mode 100644 test/Concurrency/Runtime/async_taskpool_neverConsumingTasks.swift diff --git a/include/swift/ABI/MetadataValues.h b/include/swift/ABI/MetadataValues.h index 87f3235f41726..6ec000c4b2c0f 100644 --- a/include/swift/ABI/MetadataValues.h +++ b/include/swift/ABI/MetadataValues.h @@ -55,6 +55,9 @@ enum { /// The number of words in a task group. NumWords_TaskGroup = 32, + /// The number of words in a task pool. + NumWords_TaskPool = 32, + /// The number of words in an AsyncLet (flags + child task context & allocation) NumWords_AsyncLet = 80, // 640 bytes ought to be enough for anyone @@ -145,6 +148,9 @@ const size_t Alignment_TaskGroup = MaximumAlignment; /// The alignment of an AsyncLet. const size_t Alignment_AsyncLet = MaximumAlignment; +/// The alignment of a TaskPool. +const size_t Alignment_TaskPool = MaximumAlignment; + /// Flags stored in the value-witness table. template class TargetValueWitnessFlags { @@ -2249,17 +2255,17 @@ class TaskCreateFlags : public FlagSet { public: enum { // Priority that user specified while creating the task - RequestedPriority = 0, - RequestedPriority_width = 8, - - Task_IsChildTask = 8, - // Should only be set in task-to-thread model where Task.runInline is - // available - Task_IsInlineTask = 9, - Task_CopyTaskLocals = 10, - Task_InheritContext = 11, - Task_EnqueueJob = 12, - Task_AddPendingGroupTaskUnconditionally = 13, + RequestedPriority = 0, + RequestedPriority_width = 8, + + Task_IsChildTask = 8, + // Should only be set in task-to-thread model + // where Task.runInline is available + Task_IsInlineTask = 9, + Task_CopyTaskLocals = 10, + Task_InheritContext = 11, + Task_EnqueueJob = 12, + Task_AddPendingGroupTaskUnconditionally = 13, }; explicit constexpr TaskCreateFlags(size_t bits) : FlagSet(bits) {} @@ -2286,6 +2292,10 @@ class TaskCreateFlags : public FlagSet { FLAGSET_DEFINE_FLAG_ACCESSORS(Task_AddPendingGroupTaskUnconditionally, addPendingGroupTaskUnconditionally, setAddPendingGroupTaskUnconditionally) + // re-use the group flag for adding to a pool + FLAGSET_DEFINE_FLAG_ACCESSORS(Task_AddPendingGroupTaskUnconditionally, + addPendingPoolTaskUnconditionally, + setAddPendingPoolTaskUnconditionally) }; /// Flags for schedulable jobs. @@ -2305,7 +2315,7 @@ class JobFlags : public FlagSet { Task_IsChildTask = 24, Task_IsFuture = 25, Task_IsGroupChildTask = 26, - // 27 is currently unused + Task_IsPoolChildTask = 27, Task_IsAsyncLetTask = 28, }; @@ -2339,6 +2349,9 @@ class JobFlags : public FlagSet { FLAGSET_DEFINE_FLAG_ACCESSORS(Task_IsAsyncLetTask, task_isAsyncLetTask, task_setIsAsyncLetTask) + FLAGSET_DEFINE_FLAG_ACCESSORS(Task_IsPoolChildTask, + task_isPoolChildTask, + task_setIsPoolChildTask) }; /// Kinds of task status record. @@ -2363,6 +2376,9 @@ enum class TaskStatusRecordKind : uint8_t { /// escalated. EscalationNotification = 4, + /// TaskPool + TaskPool = 5, + // Kinds >= 192 are private to the implementation. First_Reserved = 192, Private_RecordLock = 192 @@ -2379,6 +2395,8 @@ enum class TaskOptionRecordKind : uint8_t { AsyncLet = 2, /// Request a child task for an 'async let'. AsyncLetWithBuffer = 3, + /// Request a child task to be part of a specific task pool. + TaskPool = 4, /// Request a child task for swift_task_run_inline. RunInline = UINT8_MAX, }; diff --git a/include/swift/ABI/Task.h b/include/swift/ABI/Task.h index 5d1c6c4422a1c..e90c50e9ef304 100644 --- a/include/swift/ABI/Task.h +++ b/include/swift/ABI/Task.h @@ -39,6 +39,7 @@ struct SwiftError; class TaskStatusRecord; class TaskOptionRecord; class TaskGroup; +class TaskPool; extern FullMetadata jobHeapMetadata; @@ -183,11 +184,11 @@ class NullaryContinuationJob : public Job { /// ### Fragments /// An AsyncTask may have the following fragments: /// -/// +--------------------------+ -/// | childFragment? | -/// | groupChildFragment? | -/// | futureFragment? |* -/// +--------------------------+ +/// +------------------------------------------------+ +/// | childFragment? | +/// | (groupChildFragment | poolChildFragment)? | +/// | futureFragment? |* +/// +------------------------------------------------+ /// /// * The future fragment is dynamic in size, based on the future result type /// it can hold, and thus must be the *last* fragment. @@ -425,6 +426,7 @@ class AsyncTask : public Job { GroupChildFragment *groupChildFragment() { assert(hasGroupChildFragment()); + assert(!hasPoolChildFragment()); // pool and group are mutually exclusive auto offset = reinterpret_cast(this); offset += sizeof(AsyncTask); @@ -434,6 +436,45 @@ class AsyncTask : public Job { return reinterpret_cast(offset); } + // ==== TaskPool Child ------------------------------------------------------ + + /// A child task created by `pool.addTask` is called a "task pool child." + /// Upon completion, in addition to the usual future notifying all its waiters, + /// it must also `pool->offer` itself to the pool. + class PoolChildFragment { + private: + TaskPool* Pool; + + friend class AsyncTask; + friend class TaskPool; + + public: + explicit PoolChildFragment(TaskPool *pool) + : Pool(pool) {} + + /// Return the group this task should offer into when it completes. + TaskPool* getPool() { + return Pool; + } + }; + + // Checks if task is a child of a TaskPool task. + // + // A child task that is a group child knows that it's parent is a group + // and therefore may `groupOffer` to it upon completion. + bool hasPoolChildFragment() const { return Flags.task_isPoolChildTask(); } + + PoolChildFragment *poolChildFragment() { + assert(hasPoolChildFragment()); + + auto offset = reinterpret_cast(this); + offset += sizeof(AsyncTask); + if (hasChildFragment()) + offset += sizeof(ChildFragment); + + return reinterpret_cast(offset); + } + // ==== Future --------------------------------------------------------------- class FutureFragment { @@ -549,12 +590,15 @@ class AsyncTask : public Job { FutureFragment *futureFragment() { assert(isFuture()); - auto offset = reinterpret_cast(this); + auto offset = reinterpret_cast(this); offset += sizeof(AsyncTask); if (hasChildFragment()) offset += sizeof(ChildFragment); - if (hasGroupChildFragment()) + if (hasGroupChildFragment()) { offset += sizeof(GroupChildFragment); + } else if (hasPoolChildFragment()) { + offset += sizeof(PoolChildFragment); + } return reinterpret_cast(offset); } diff --git a/include/swift/ABI/TaskLocal.h b/include/swift/ABI/TaskLocal.h index cbe17932d6236..87cabf8577453 100644 --- a/include/swift/ABI/TaskLocal.h +++ b/include/swift/ABI/TaskLocal.h @@ -27,6 +27,7 @@ struct OpaqueValue; struct SwiftError; class TaskStatusRecord; class TaskGroup; +class TaskPool; // ==== Task Locals Values --------------------------------------------------- diff --git a/include/swift/ABI/TaskOptions.h b/include/swift/ABI/TaskOptions.h index eb381b843bc61..eff8bac6f6be8 100644 --- a/include/swift/ABI/TaskOptions.h +++ b/include/swift/ABI/TaskOptions.h @@ -75,6 +75,23 @@ class TaskGroupTaskOptionRecord : public TaskOptionRecord { } }; +class TaskPoolTaskOptionRecord : public TaskOptionRecord { + TaskPool * const Pool; + + public: + TaskPoolTaskOptionRecord(TaskPool *pool) + : TaskOptionRecord(TaskOptionRecordKind::TaskPool), + Pool(pool) {} + + TaskPool *getPool() const { + return Pool; + } + + static bool classof(const TaskOptionRecord *record) { + return record->getKind() == TaskOptionRecordKind::TaskPool; + } +}; + /// Task option to specify on what executor the task should be executed. /// diff --git a/include/swift/ABI/TaskPool.h b/include/swift/ABI/TaskPool.h new file mode 100644 index 0000000000000..d149c5eecc1fc --- /dev/null +++ b/include/swift/ABI/TaskPool.h @@ -0,0 +1,61 @@ +//===--- TaskPool.h - ABI structures for task pools -00--------*- C++ -*-===// +// +// This source file is part of the Swift.org open source project +// +// Copyright (c) 2014 - 2022 Apple Inc. and the Swift project authors +// Licensed under Apache License v2.0 with Runtime Library Exception +// +// See https://swift.org/LICENSE.txt for license information +// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors +// +//===----------------------------------------------------------------------===// +// +// Swift ABI describing task pools. +// +//===----------------------------------------------------------------------===// + +#ifndef SWIFT_ABI_TASK_POOL_H +#define SWIFT_ABI_TASK_POOL_H + +#include "swift/ABI/Task.h" +#include "swift/ABI/TaskStatus.h" +#include "swift/ABI/HeapObject.h" +#include "swift/Runtime/Concurrency.h" +#include "swift/Runtime/Config.h" +#include "swift/Basic/RelativePointer.h" +#include "swift/Basic/STLExtras.h" + +namespace swift { + +/// The task group is responsible for maintaining dynamically created child tasks. +class alignas(Alignment_TaskPool) TaskPool { +public: + // These constructors do not initialize the group instance, and the + // destructor does not destroy the group instance; you must call + // swift_taskGroup_{initialize,destroy} yourself. + constexpr TaskPool() + : PrivateData{} {} + + void *PrivateData[NumWords_TaskPool]; + + /// Upon a future task's completion, offer it to the task group it belongs to. + void offer(AsyncTask *completed, AsyncContext *context); + + /// Checks the cancellation status of the group. + bool isCancelled(); + + // Add a child task to the task group. Always called while holding the + // status record lock of the task group's owning task. + void addChildTask(AsyncTask *task); + + // Remove a child task from the task group. Always called while holding + // the status record lock of the task group's owning task. + void removeChildTask(AsyncTask *task); + + // Provide accessor for task group's status record + TaskPoolTaskStatusRecord *getTaskRecord(); +}; + +} // end namespace swift + +#endif // SWIFT_ABI_TASK_POOL_H diff --git a/include/swift/ABI/TaskStatus.h b/include/swift/ABI/TaskStatus.h index bb7b5b5d97e2a..6f2a9e4ab0a66 100644 --- a/include/swift/ABI/TaskStatus.h +++ b/include/swift/ABI/TaskStatus.h @@ -260,6 +260,95 @@ class TaskGroupTaskStatusRecord : public TaskStatusRecord { } }; +class TaskPoolTaskStatusRecord : public TaskStatusRecord { + AsyncTask *FirstChild; + AsyncTask *LastChild; + +public: + TaskPoolTaskStatusRecord() + : TaskStatusRecord(TaskStatusRecordKind::TaskPool), + FirstChild(nullptr), + LastChild(nullptr) { + } + + TaskPoolTaskStatusRecord(AsyncTask *child) + : TaskStatusRecord(TaskStatusRecordKind::TaskPool), + FirstChild(child), + LastChild(child) { + assert(!LastChild || !LastChild->childFragment()->getNextChild()); + } + + TaskPool *getPool() { return reinterpret_cast(this); } + + /// Return the first child linked by this record. This may be null; + /// if not, it (and all of its successors) are guaranteed to satisfy + /// `isChildTask()`. + AsyncTask *getFirstChild() const { return FirstChild; } + + /// Attach the passed in `child` task to this group. + void attachChild(AsyncTask *child) { + assert(child->hasPoolChildFragment()); + assert(child->poolChildFragment()->getPool() == getPool()); + + auto oldLastChild = LastChild; + LastChild = child; + + if (!FirstChild) { + // This is the first child we ever attach, so store it as FirstChild. + FirstChild = child; + return; + } + + oldLastChild->childFragment()->setNextChild(child); + } + + void detachChild(AsyncTask *child) { + assert(child && "cannot remove a null child from group"); + if (FirstChild == child) { + FirstChild = getNextChildTask(child); + if (FirstChild == nullptr) { + LastChild = nullptr; + } + return; + } + + AsyncTask *prev = FirstChild; + // Remove the child from the linked list, i.e.: + // prev -> afterPrev -> afterChild + // == + // child -> afterChild + // Becomes: + // prev --------------> afterChild + while (prev) { + auto afterPrev = getNextChildTask(prev); + + if (afterPrev == child) { + auto afterChild = getNextChildTask(child); + prev->childFragment()->setNextChild(afterChild); + if (child == LastChild) { + LastChild = prev; + } + return; + } + + prev = afterPrev; + } + } + + static AsyncTask *getNextChildTask(AsyncTask *task) { + return task->childFragment()->getNextChild(); + } + + using child_iterator = LinkedListIterator; + llvm::iterator_range children() const { + return child_iterator::rangeBeginning(getFirstChild()); + } + + static bool classof(const TaskStatusRecord *record) { + return record->getKind() == TaskStatusRecordKind::TaskGroup; + } +}; + /// A cancellation record which states that a task has an arbitrary /// function that needs to be called if the task is cancelled. /// diff --git a/include/swift/AST/Builtins.def b/include/swift/AST/Builtins.def index 8292f750bce47..67da39dd919f4 100644 --- a/include/swift/AST/Builtins.def +++ b/include/swift/AST/Builtins.def @@ -785,6 +785,14 @@ BUILTIN_MISC_OPERATION(CreateTaskGroup, BUILTIN_MISC_OPERATION(DestroyTaskGroup, "destroyTaskGroup", "", Special) +/// Create a task pool. +BUILTIN_MISC_OPERATION(CreateTaskPool, + "createTaskPool", "", Special) + +/// Destroy a task pool. +BUILTIN_MISC_OPERATION(DestroyTaskPool, + "destroyTaskPool", "", Special) + /// A builtin that can only be called from a transparent generic function. Takes /// two operands, the first operand the result address, the second operand the /// input address. Transforms into @@ -954,6 +962,17 @@ BUILTIN_MISC_OPERATION_WITH_SILGEN(CreateAsyncTask, BUILTIN_MISC_OPERATION_WITH_SILGEN(CreateAsyncTaskInGroup, "createAsyncTaskInGroup", "", Special) +/// createAsyncTaskInPool(): ( +/// Int, // flags +/// Builtin.RawPointer, // pool +/// @escaping () async throws -> T // function +/// ) -> Builtin.NativeObject +/// +/// Create a new asynchronous task future, given flags, a parent task, +/// task pool and a function to execute. +BUILTIN_MISC_OPERATION_WITH_SILGEN(CreateAsyncTaskInPool, + "createAsyncTaskInPool", "", Special) + /// globalStringTablePointer has type String -> Builtin.RawPointer. /// It returns an immortal, global string table pointer for strings constructed /// from string literals. We consider it effects as readnone meaning that it diff --git a/include/swift/Basic/Features.def b/include/swift/Basic/Features.def index 698b2da678798..f7cb4d871d766 100644 --- a/include/swift/Basic/Features.def +++ b/include/swift/Basic/Features.def @@ -79,7 +79,8 @@ LANGUAGE_FEATURE(InheritActorContext, 0, "@_inheritActorContext attribute", true LANGUAGE_FEATURE(ImplicitSelfCapture, 0, "@_implicitSelfCapture attribute", true) LANGUAGE_FEATURE(BuiltinBuildExecutor, 0, "Executor-building builtins", true) LANGUAGE_FEATURE(BuiltinBuildMainExecutor, 0, "MainActor executor building builtin", true) -LANGUAGE_FEATURE(BuiltinCreateAsyncTaskInGroup, 0, "MainActor executor building builtin", true) +LANGUAGE_FEATURE(BuiltinCreateAsyncTaskInGroup, 0, "Add a task into a TaskGroup", true) +LANGUAGE_FEATURE(BuiltinCreateAsyncTaskInPool, 0, "Add a task into a TaskPool", true) LANGUAGE_FEATURE(BuiltinCopy, 0, "Builtin.copy()", true) LANGUAGE_FEATURE(BuiltinStackAlloc, 0, "Builtin.stackAlloc", true) LANGUAGE_FEATURE(BuiltinTaskRunInline, 0, "Builtin.taskRunInline", true) diff --git a/include/swift/Runtime/Concurrency.h b/include/swift/Runtime/Concurrency.h index 141459ae62fe3..8d39a1ea9d4e6 100644 --- a/include/swift/Runtime/Concurrency.h +++ b/include/swift/Runtime/Concurrency.h @@ -19,8 +19,9 @@ #include "swift/ABI/AsyncLet.h" #include "swift/ABI/Task.h" -#include "swift/ABI/TaskGroup.h" #include "swift/ABI/TaskStatus.h" +#include "swift/ABI/TaskGroup.h" +#include "swift/ABI/TaskPool.h" #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wreturn-type-c-linkage" @@ -288,6 +289,117 @@ bool swift_taskGroup_isCancelled(TaskGroup *group); SWIFT_EXPORT_FROM(swift_Concurrency) SWIFT_CC(swift) bool swift_taskGroup_isEmpty(TaskGroup *group); +/// Wait for a readyQueue of a Channel to become non empty. +/// +/// This can be called from any thread. Its Swift signature is +/// +/// \code +/// func swift_taskPool_waitAll( +/// waitingTask: Builtin.NativeObject, // current task +/// pool: Builtin.RawPointer +/// ) async -> T +/// \endcode +SWIFT_EXPORT_FROM(swift_Concurrency) +SWIFT_CC(swiftasync) +void swift_taskPool_waitAll( + OpaqueValue *resultPointer, SWIFT_ASYNC_CONTEXT AsyncContext *callerContext, + TaskPool *pool, ThrowingTaskFutureWaitContinuationFunction *resumeFn, + AsyncContext *callContext); + +/// Initialize a `TaskGroup` in the passed `group` memory location. +/// The caller is responsible for retaining and managing the group's lifecycle. +/// +/// Its Swift signature is +/// +/// \code +/// func swift_taskPool_initialize(group: Builtin.RawPointer) +/// \endcode +SWIFT_EXPORT_FROM(swift_Concurrency) SWIFT_CC(swift) +void swift_taskPool_initialize(TaskPool *pool, const Metadata *Void); + +/// Attach a child task to the parent task's task group record. +/// +/// This function MUST be called from the AsyncTask running the task group. +/// +/// Since the group (or rather, its record) is inserted in the parent task at +/// creation we do not need the parent task here, the group already is attached +/// to it. +/// Its Swift signature is +/// +/// \code +/// func swift_taskPool_attachChild( +/// group: Builtin.RawPointer, +/// child: Builtin.NativeObject +/// ) +/// \endcode +SWIFT_EXPORT_FROM(swift_Concurrency) SWIFT_CC(swift) +void swift_taskPool_attachChild(TaskPool *pool, AsyncTask *child); + +/// Its Swift signature is +/// +/// This function MUST be called from the AsyncTask running the task group. +/// +/// \code +/// func swift_taskPool_destroy(_ group: Builtin.RawPointer) +/// \endcode +SWIFT_EXPORT_FROM(swift_Concurrency) SWIFT_CC(swift) +void swift_taskPool_destroy(TaskPool *pool); + +/// Before starting a task group child task, inform the group that there is one +/// more 'pending' child to account for. +/// +/// This function SHOULD be called from the AsyncTask running the task group, +/// however is generally thread-safe as it only works with the group status. +/// +/// Its Swift signature is +/// +/// \code +/// func swift_taskPool_addPending( +/// group: Builtin.RawPointer, +/// unconditionally: Bool +/// ) -> Bool +/// \endcode +SWIFT_EXPORT_FROM(swift_Concurrency) SWIFT_CC(swift) +bool swift_taskPool_addPending(TaskPool *pool, bool unconditionally); + +/// Cancel all tasks in the group. +/// This also prevents new tasks from being added. +/// +/// This can be called from any thread. +/// +/// Its Swift signature is +/// +/// \code +/// func swift_taskPool_cancelAll(group: Builtin.RawPointer) +/// \endcode +SWIFT_EXPORT_FROM(swift_Concurrency) SWIFT_CC(swift) +void swift_taskPool_cancelAll(TaskPool *pool); + +/// Check ONLY if the group was explicitly cancelled, e.g. by `cancelAll`. +/// +/// This check DOES NOT take into account the task in which the group is running +/// being cancelled or not. +/// +/// This can be called from any thread. Its Swift signature is +/// +/// \code +/// func swift_taskPool_isCancelled(group: Builtin.RawPointer) +/// \endcode +SWIFT_EXPORT_FROM(swift_Concurrency) SWIFT_CC(swift) +bool swift_taskPool_isCancelled(TaskPool *pool); + +/// Check the readyQueue of a task group, return true if it has no pending tasks. +/// +/// This can be called from any thread. Its Swift signature is +/// +/// \code +/// func swift_taskPool_isEmpty( +/// _ group: Builtin.RawPointer +/// ) -> Bool +/// \endcode +SWIFT_EXPORT_FROM(swift_Concurrency) SWIFT_CC(swift) +bool swift_taskPool_isEmpty(TaskPool *pool); + /// DEPRECATED. swift_asyncLet_begin is used instead. /// Its Swift signature is /// diff --git a/include/swift/Runtime/RuntimeFunctions.def b/include/swift/Runtime/RuntimeFunctions.def index 6703e24d08947..683c48369af27 100644 --- a/include/swift/Runtime/RuntimeFunctions.def +++ b/include/swift/Runtime/RuntimeFunctions.def @@ -2069,6 +2069,24 @@ FUNCTION(TaskGroupDestroy, ATTRS(NoUnwind), EFFECT(Concurrency)) +// void swift_taskPool_initialize(TaskPool *pool, const Metadata *Void); +FUNCTION(TaskPoolInitialize, + swift_taskPool_initialize, SwiftCC, + ConcurrencyAvailability, + RETURNS(VoidTy), + ARGS(Int8PtrTy, TypeMetadataPtrTy), + ATTRS(NoUnwind), + EFFECT(Concurrency)) + +// void swift_taskPool_destroy(TaskPool *pool); +FUNCTION(TaskPoolDestroy, + swift_taskPool_destroy, SwiftCC, + ConcurrencyAvailability, + RETURNS(VoidTy), + ARGS(Int8PtrTy), + ATTRS(NoUnwind), + EFFECT(Concurrency)) + // AutoDiffLinearMapContext *swift_autoDiffCreateLinearMapContext(size_t); FUNCTION(AutoDiffCreateLinearMapContext, swift_autoDiffCreateLinearMapContext, SwiftCC, diff --git a/lib/AST/ASTPrinter.cpp b/lib/AST/ASTPrinter.cpp index d52bebb41729e..54faec154b6dc 100644 --- a/lib/AST/ASTPrinter.cpp +++ b/lib/AST/ASTPrinter.cpp @@ -2910,6 +2910,10 @@ static bool usesFeatureBuiltinCreateAsyncTaskInGroup(Decl *decl) { return false; } +static bool usesFeatureBuiltinCreateAsyncTaskInPool(Decl *decl) { + return false; +} + static bool usesFeatureBuiltinCopy(Decl *decl) { return false; } static bool usesFeatureBuiltinTaskRunInline(Decl *) { return false; } diff --git a/lib/AST/Builtins.cpp b/lib/AST/Builtins.cpp index f26552f12d5ac..8b0b2dd0d2c93 100644 --- a/lib/AST/Builtins.cpp +++ b/lib/AST/Builtins.cpp @@ -1455,6 +1455,19 @@ static ValueDecl *getCreateAsyncTaskInGroup(ASTContext &ctx, Identifier id) { return builder.build(id); } +static ValueDecl *getCreateAsyncTaskInPool(ASTContext &ctx, Identifier id) { + BuiltinFunctionBuilder builder(ctx); + auto genericParam = makeGenericParam().build(builder); // + builder.addParameter(makeConcrete(ctx.getIntType())); // 0 flags + builder.addParameter(makeConcrete(ctx.TheRawPointerType)); // 1 pool + auto extInfo = ASTExtInfoBuilder().withAsync().withThrows().build(); + builder.addParameter( + makeConcrete(FunctionType::get({ }, genericParam, extInfo))); // 2 operation + builder.setResult(makeConcrete(getAsyncTaskAndContextType(ctx))); + + return builder.build(id); +} + static ValueDecl *getTaskRunInline(ASTContext &ctx, Identifier id) { return getBuiltinFunction( ctx, id, _thin, _generics(_unrestricted), @@ -1545,6 +1558,19 @@ static ValueDecl *getDestroyTaskGroup(ASTContext &ctx, Identifier id) { _void); } +static ValueDecl *getCreateTaskPool(ASTContext &ctx, Identifier id) { + return getBuiltinFunction(ctx, id, _thin, + _generics(_unrestricted), + _parameters(_metatype(_typeparam(0))), + _rawPointer); +} + +static ValueDecl *getDestroyTaskPool(ASTContext &ctx, Identifier id) { + return getBuiltinFunction(ctx, id, _thin, + _parameters(_rawPointer), + _void); +} + static ValueDecl *getBuildMainActorExecutorRef(ASTContext &ctx, Identifier id) { return getBuiltinFunction(ctx, id, _thin, _parameters(), _executor); @@ -2808,6 +2834,9 @@ ValueDecl *swift::getBuiltinValueDecl(ASTContext &Context, Identifier Id) { case BuiltinValueKind::CreateAsyncTaskInGroup: return getCreateAsyncTaskInGroup(Context, Id); + case BuiltinValueKind::CreateAsyncTaskInPool: + return getCreateAsyncTaskInPool(Context, Id); + case BuiltinValueKind::TaskRunInline: return getTaskRunInline(Context, Id); @@ -2869,10 +2898,14 @@ ValueDecl *swift::getBuiltinValueDecl(ASTContext &Context, Identifier Id) { case BuiltinValueKind::CreateTaskGroup: return getCreateTaskGroup(Context, Id); - case BuiltinValueKind::DestroyTaskGroup: return getDestroyTaskGroup(Context, Id); + case BuiltinValueKind::CreateTaskPool: + return getCreateTaskPool(Context, Id); + case BuiltinValueKind::DestroyTaskPool: + return getDestroyTaskPool(Context, Id); + case BuiltinValueKind::ResumeNonThrowingContinuationReturning: case BuiltinValueKind::ResumeThrowingContinuationReturning: return getResumeContinuationReturning(Context, Id); diff --git a/lib/IRGen/Callee.h b/lib/IRGen/Callee.h index 36dbbb4d2e1fd..6027d64762d27 100644 --- a/lib/IRGen/Callee.h +++ b/lib/IRGen/Callee.h @@ -175,6 +175,7 @@ namespace irgen { AsyncLetGetThrowing, AsyncLetFinish, TaskGroupWaitNext, + TaskPoolWaitAll, DistributedExecuteTarget, }; @@ -247,6 +248,7 @@ namespace irgen { case SpecialKind::AsyncLetGetThrowing: case SpecialKind::AsyncLetFinish: case SpecialKind::TaskGroupWaitNext: + case SpecialKind::TaskPoolWaitAll: return true; case SpecialKind::DistributedExecuteTarget: return false; @@ -277,6 +279,7 @@ namespace irgen { case SpecialKind::AsyncLetGetThrowing: case SpecialKind::AsyncLetFinish: case SpecialKind::TaskGroupWaitNext: + case SpecialKind::TaskPoolWaitAll: return true; case SpecialKind::DistributedExecuteTarget: return false; diff --git a/lib/IRGen/GenBuiltin.cpp b/lib/IRGen/GenBuiltin.cpp index ac5ee640bd1b4..077b27386cafe 100644 --- a/lib/IRGen/GenBuiltin.cpp +++ b/lib/IRGen/GenBuiltin.cpp @@ -276,18 +276,30 @@ void irgen::emitBuiltinCall(IRGenFunction &IGF, const BuiltinInfo &Builtin, return; } + // Task Group if (Builtin.ID == BuiltinValueKind::CreateTaskGroup) { // Claim metadata pointer. (void)args.claimAll(); out.add(emitCreateTaskGroup(IGF, substitutions)); return; } - if (Builtin.ID == BuiltinValueKind::DestroyTaskGroup) { emitDestroyTaskGroup(IGF, args.claimNext()); return; } + // Task Pool + if (Builtin.ID == BuiltinValueKind::CreateTaskPool) { + // Claim metadata pointer. + (void) args.claimAll(); + out.add(emitCreateTaskPool(IGF, substitutions)); + return; + } + if (Builtin.ID == BuiltinValueKind::DestroyTaskPool) { + emitDestroyTaskPool(IGF, args.claimNext()); + return; + } + // Everything else cares about the (rvalue) argument. if (Builtin.ID == BuiltinValueKind::CancelAsyncTask) { @@ -296,13 +308,18 @@ void irgen::emitBuiltinCall(IRGenFunction &IGF, const BuiltinInfo &Builtin, } if (Builtin.ID == BuiltinValueKind::CreateAsyncTask || - Builtin.ID == BuiltinValueKind::CreateAsyncTaskInGroup) { + Builtin.ID == BuiltinValueKind::CreateAsyncTaskInGroup || + Builtin.ID == BuiltinValueKind::CreateAsyncTaskInPool) { auto flags = args.claimNext(); auto taskGroup = (Builtin.ID == BuiltinValueKind::CreateAsyncTaskInGroup) ? args.claimNext() : nullptr; + auto taskPool = + (Builtin.ID == BuiltinValueKind::CreateAsyncTaskInPool) + ? args.claimNext() + : nullptr; auto futureResultType = args.claimNext(); auto taskFunction = args.claimNext(); auto taskContext = args.claimNext(); @@ -311,6 +328,7 @@ void irgen::emitBuiltinCall(IRGenFunction &IGF, const BuiltinInfo &Builtin, IGF, flags, taskGroup, + taskPool, futureResultType, taskFunction, taskContext, substitutions); diff --git a/lib/IRGen/GenCall.cpp b/lib/IRGen/GenCall.cpp index 0cf254feb2af8..5643984dc04bd 100644 --- a/lib/IRGen/GenCall.cpp +++ b/lib/IRGen/GenCall.cpp @@ -162,6 +162,7 @@ FunctionPointerKind::getStaticAsyncContextSize(IRGenModule &IGM) const { case SpecialKind::AsyncLetGetThrowing: case SpecialKind::AsyncLetFinish: case SpecialKind::TaskGroupWaitNext: + case SpecialKind::TaskPoolWaitAll: case SpecialKind::DistributedExecuteTarget: // The current guarantee for all of these functions is the same. // See TaskFutureWaitAsyncContext. @@ -4128,6 +4129,7 @@ llvm::Value *irgen::emitTaskCreate( IRGenFunction &IGF, llvm::Value *flags, llvm::Value *taskGroup, + llvm::Value *taskPool, llvm::Value *futureResultType, llvm::Value *taskFunction, llvm::Value *localContextInfo, @@ -4136,7 +4138,9 @@ llvm::Value *irgen::emitTaskCreate( // it. llvm::Value *taskOptions = llvm::ConstantInt::get( IGF.IGM.SwiftTaskOptionRecordPtrTy, 0); + if (taskGroup) { + assert(!taskPool); TaskOptionRecordFlags optionsFlags(TaskOptionRecordKind::TaskGroup); llvm::Value *optionsFlagsVal = llvm::ConstantInt::get( IGF.IGM.SizeTy, optionsFlags.getOpaqueValue()); @@ -4158,6 +4162,29 @@ llvm::Value *irgen::emitTaskCreate( optionsRecord.getAddress(), IGF.IGM.SwiftTaskOptionRecordPtrTy); } + if (taskPool) { + assert(!taskGroup); + TaskOptionRecordFlags optionsFlags(TaskOptionRecordKind::TaskPool); + llvm::Value *optionsFlagsVal = llvm::ConstantInt::get( + IGF.IGM.SizeTy, optionsFlags.getOpaqueValue()); + + auto optionsRecord = IGF.createAlloca( + IGF.IGM.SwiftTaskGroupTaskOptionRecordTy, Alignment(), + "task_pool_options"); + auto optionsBaseRecord = IGF.Builder.CreateStructGEP( + optionsRecord, 0, Size()); + IGF.Builder.CreateStore( + optionsFlagsVal, + IGF.Builder.CreateStructGEP(optionsBaseRecord, 0, Size())); + IGF.Builder.CreateStore( + taskOptions, IGF.Builder.CreateStructGEP(optionsBaseRecord, 1, Size())); + + IGF.Builder.CreateStore( + taskPool, IGF.Builder.CreateStructGEP(optionsRecord, 1, Size())); + taskOptions = IGF.Builder.CreateBitOrPointerCast( + optionsRecord.getAddress(), IGF.IGM.SwiftTaskOptionRecordPtrTy); + } + assert(futureResultType && "no future?!"); llvm::CallInst *result = IGF.Builder.CreateCall( IGF.IGM.getTaskCreateFunctionPointer(), diff --git a/lib/IRGen/GenCall.h b/lib/IRGen/GenCall.h index c354eb7e2b31a..d522e803092b3 100644 --- a/lib/IRGen/GenCall.h +++ b/lib/IRGen/GenCall.h @@ -223,6 +223,7 @@ namespace irgen { IRGenFunction &IGF, llvm::Value *flags, llvm::Value *taskGroup, + llvm::Value *taskPool, llvm::Value *futureResultType, llvm::Value *taskFunction, llvm::Value *localContextInfo, diff --git a/lib/IRGen/GenConcurrency.cpp b/lib/IRGen/GenConcurrency.cpp index d9512ab5952c2..8ab6c414147cd 100644 --- a/lib/IRGen/GenConcurrency.cpp +++ b/lib/IRGen/GenConcurrency.cpp @@ -296,6 +296,36 @@ void irgen::emitDestroyTaskGroup(IRGenFunction &IGF, llvm::Value *group) { IGF.Builder.CreateLifetimeEnd(group); } +llvm::Value *irgen::emitCreateTaskPool(IRGenFunction &IGF, + SubstitutionMap subs) { + auto ty = llvm::ArrayType::get(IGF.IGM.Int8PtrTy, NumWords_TaskPool); + auto address = IGF.createAlloca(ty, Alignment(Alignment_TaskPool)); + auto pool = IGF.Builder.CreateBitCast(address.getAddress(), + IGF.IGM.Int8PtrTy); + IGF.Builder.CreateLifetimeStart(pool); + assert(subs.getReplacementTypes().size() == 1 && + "createTaskPool should have a type substitution"); + auto resultType = subs.getReplacementTypes()[0]->getCanonicalType(); + auto resultTypeMetadata = IGF.emitAbstractTypeMetadataRef(resultType); + + auto *call = + IGF.Builder.CreateCall(IGF.IGM.getTaskPoolInitializeFunctionPointer(), + {pool, resultTypeMetadata}); + call->setDoesNotThrow(); + call->setCallingConv(IGF.IGM.SwiftCC); + + return pool; +} + +void irgen::emitDestroyTaskPool(IRGenFunction &IGF, llvm::Value *pool) { + auto *call = IGF.Builder.CreateCall( + IGF.IGM.getTaskPoolDestroyFunctionPointer(), {pool}); + call->setDoesNotThrow(); + call->setCallingConv(IGF.IGM.SwiftCC); + + IGF.Builder.CreateLifetimeEnd(pool); +} + llvm::Function *IRGenModule::getAwaitAsyncContinuationFn() { StringRef name = "__swift_continuation_await_point"; if (llvm::GlobalValue *F = Module.getNamedValue(name)) diff --git a/lib/IRGen/GenConcurrency.h b/lib/IRGen/GenConcurrency.h index 6ee88c61c5397..60c47f28d1527 100644 --- a/lib/IRGen/GenConcurrency.h +++ b/lib/IRGen/GenConcurrency.h @@ -74,6 +74,12 @@ llvm::Value *emitCreateTaskGroup(IRGenFunction &IGF, SubstitutionMap subs); /// Emit the destroyTaskGroup builtin. void emitDestroyTaskGroup(IRGenFunction &IGF, llvm::Value *group); +/// Emit the createTaskPool builtin. +llvm::Value *emitCreateTaskPool(IRGenFunction &IGF, SubstitutionMap subs); + +/// Emit the destroyTaskPool builtin. +void emitDestroyTaskPool(IRGenFunction &IGF, llvm::Value *pool); + void emitTaskRunInline(IRGenFunction &IGF, SubstitutionMap subs, llvm::Value *result, llvm::Value *closure, llvm::Value *closureContext); diff --git a/lib/IRGen/IRGenModule.cpp b/lib/IRGen/IRGenModule.cpp index 5e2b725dfa043..89c7a4edc2980 100644 --- a/lib/IRGen/IRGenModule.cpp +++ b/lib/IRGen/IRGenModule.cpp @@ -643,6 +643,7 @@ IRGenModule::IRGenModule(IRGenerator &irgen, SwiftAsyncLetPtrTy = Int8PtrTy; // we pass it opaquely (AsyncLet*) SwiftTaskOptionRecordPtrTy = SizeTy; // Builtin.RawPointer? that we get as (TaskOptionRecord*) SwiftTaskGroupPtrTy = Int8PtrTy; // we pass it opaquely (TaskGroup*) + SwiftTaskPoolPtrTy = Int8PtrTy; // we pass it opaquely (TaskPool*) SwiftTaskOptionRecordTy = createStructType(*this, "swift.task_option", { SizeTy, // Flags SwiftTaskOptionRecordPtrTy, // Parent @@ -652,6 +653,11 @@ IRGenModule::IRGenModule(IRGenerator &irgen, SwiftTaskOptionRecordTy, // Base option record SwiftTaskGroupPtrTy, // Task group }); + SwiftTaskGroupTaskOptionRecordTy = createStructType( + *this, "swift.task_pool_task_option", { + SwiftTaskOptionRecordTy, // Base option record + SwiftTaskPoolPtrTy, // Task pool + }); ExecutorFirstTy = SizeTy; ExecutorSecondTy = SizeTy; SwiftExecutorTy = createStructType(*this, "swift.executor", { diff --git a/lib/IRGen/IRGenModule.h b/lib/IRGen/IRGenModule.h index 16c3d87e07d1e..a6b500b832e43 100644 --- a/lib/IRGen/IRGenModule.h +++ b/lib/IRGen/IRGenModule.h @@ -761,8 +761,10 @@ class IRGenModule { llvm::PointerType *SwiftAsyncLetPtrTy; llvm::IntegerType *SwiftTaskOptionRecordPtrTy; llvm::PointerType *SwiftTaskGroupPtrTy; + llvm::PointerType *SwiftTaskPoolPtrTy; llvm::StructType *SwiftTaskOptionRecordTy; llvm::StructType *SwiftTaskGroupTaskOptionRecordTy; + llvm::StructType *SwiftTaskPoolTaskOptionRecordTy; llvm::PointerType *SwiftJobPtrTy; llvm::IntegerType *ExecutorFirstTy; llvm::IntegerType *ExecutorSecondTy; diff --git a/lib/IRGen/IRGenSIL.cpp b/lib/IRGen/IRGenSIL.cpp index 5004190438291..02b3b5d4e2e8a 100644 --- a/lib/IRGen/IRGenSIL.cpp +++ b/lib/IRGen/IRGenSIL.cpp @@ -2653,6 +2653,9 @@ FunctionPointer::Kind irgen::classifyFunctionPointerKind(SILFunction *fn) { if (name.equals("swift_taskGroup_wait_next_throwing")) return SpecialKind::TaskGroupWaitNext; + if (name.equals("swift_taskPool_waitAll")) + return SpecialKind::TaskPoolWaitAll; + if (name.equals("swift_distributed_execute_target")) return SpecialKind::DistributedExecuteTarget; } diff --git a/lib/SIL/IR/OperandOwnership.cpp b/lib/SIL/IR/OperandOwnership.cpp index 2e20a0ea459f8..ad9004e7ff004 100644 --- a/lib/SIL/IR/OperandOwnership.cpp +++ b/lib/SIL/IR/OperandOwnership.cpp @@ -822,12 +822,15 @@ BUILTIN_OPERAND_OWNERSHIP(DestroyingConsume, EndAsyncLet) BUILTIN_OPERAND_OWNERSHIP(DestroyingConsume, StartAsyncLetWithLocalBuffer) BUILTIN_OPERAND_OWNERSHIP(DestroyingConsume, EndAsyncLetLifetime) BUILTIN_OPERAND_OWNERSHIP(InstantaneousUse, CreateTaskGroup) -BUILTIN_OPERAND_OWNERSHIP(InstantaneousUse, DestroyTaskGroup) +BUILTIN_OPERAND_OWNERSHIP(InstantaneousUse, DestroyTaskGroup) // TODO: should this be destroying consume? +BUILTIN_OPERAND_OWNERSHIP(InstantaneousUse, CreateTaskPool) +BUILTIN_OPERAND_OWNERSHIP(InstantaneousUse, DestroyTaskPool) // TODO: should this be destroying consume? BUILTIN_OPERAND_OWNERSHIP(ForwardingConsume, COWBufferForReading) const int PARAMETER_INDEX_CREATE_ASYNC_TASK_FUTURE_FUNCTION = 2; const int PARAMETER_INDEX_CREATE_ASYNC_TASK_GROUP_FUTURE_FUNCTION = 3; +const int PARAMETER_INDEX_CREATE_ASYNC_TASK_POOL_FUTURE_FUNCTION = 3; OperandOwnership OperandOwnershipBuiltinClassifier::visitCreateAsyncTask(BuiltinInst *bi, @@ -857,6 +860,20 @@ OperandOwnershipBuiltinClassifier::visitCreateAsyncTaskInGroup(BuiltinInst *bi, return OperandOwnership::InteriorPointer; } +OperandOwnership +OperandOwnershipBuiltinClassifier::visitCreateAsyncTaskInPool(BuiltinInst *bi, + StringRef attr) { + // The function operand is consumed by the new task. + if (&op == &bi->getOperandRef(PARAMETER_INDEX_CREATE_ASYNC_TASK_POOL_FUTURE_FUNCTION)) + return OperandOwnership::DestroyingConsume; + + // FIXME: These are considered InteriorPointer because they may propagate a + // pointer into a borrowed values. If they do not propagate an interior pointer, + // then they should be InstantaneousUse instead and should not require a + // guaranteed value. + return OperandOwnership::InteriorPointer; +} + OperandOwnership OperandOwnershipBuiltinClassifier:: visitResumeNonThrowingContinuationReturning(BuiltinInst *bi, StringRef attr) { // The value operand is consumed. diff --git a/lib/SIL/IR/ValueOwnership.cpp b/lib/SIL/IR/ValueOwnership.cpp index a6bc96325fe32..229fdfa7af114 100644 --- a/lib/SIL/IR/ValueOwnership.cpp +++ b/lib/SIL/IR/ValueOwnership.cpp @@ -537,6 +537,7 @@ CONSTANT_OWNERSHIP_BUILTIN(None, GetCurrentAsyncTask) CONSTANT_OWNERSHIP_BUILTIN(None, CancelAsyncTask) CONSTANT_OWNERSHIP_BUILTIN(Owned, CreateAsyncTask) CONSTANT_OWNERSHIP_BUILTIN(Owned, CreateAsyncTaskInGroup) +CONSTANT_OWNERSHIP_BUILTIN(Owned, CreateAsyncTaskInPool) CONSTANT_OWNERSHIP_BUILTIN(None, ConvertTaskToJob) CONSTANT_OWNERSHIP_BUILTIN(None, InitializeDefaultActor) CONSTANT_OWNERSHIP_BUILTIN(None, DestroyDefaultActor) @@ -557,6 +558,8 @@ CONSTANT_OWNERSHIP_BUILTIN(None, StartAsyncLetWithLocalBuffer) CONSTANT_OWNERSHIP_BUILTIN(None, EndAsyncLetLifetime) CONSTANT_OWNERSHIP_BUILTIN(None, CreateTaskGroup) CONSTANT_OWNERSHIP_BUILTIN(None, DestroyTaskGroup) +CONSTANT_OWNERSHIP_BUILTIN(None, CreateTaskPool) +CONSTANT_OWNERSHIP_BUILTIN(None, DestroyTaskPool) CONSTANT_OWNERSHIP_BUILTIN(None, TaskRunInline) CONSTANT_OWNERSHIP_BUILTIN(None, Copy) diff --git a/lib/SIL/Utils/MemAccessUtils.cpp b/lib/SIL/Utils/MemAccessUtils.cpp index a8f30a8af5c32..2d2da219d9ba0 100644 --- a/lib/SIL/Utils/MemAccessUtils.cpp +++ b/lib/SIL/Utils/MemAccessUtils.cpp @@ -2525,6 +2525,7 @@ static void visitBuiltinAddress(BuiltinInst *builtin, case BuiltinValueKind::CancelAsyncTask: case BuiltinValueKind::CreateAsyncTask: case BuiltinValueKind::CreateAsyncTaskInGroup: + case BuiltinValueKind::CreateAsyncTaskInPool: case BuiltinValueKind::AutoDiffCreateLinearMapContext: case BuiltinValueKind::AutoDiffAllocateSubcontext: case BuiltinValueKind::InitializeDefaultActor: @@ -2537,6 +2538,8 @@ static void visitBuiltinAddress(BuiltinInst *builtin, case BuiltinValueKind::EndAsyncLetLifetime: case BuiltinValueKind::CreateTaskGroup: case BuiltinValueKind::DestroyTaskGroup: + case BuiltinValueKind::CreateTaskPool: + case BuiltinValueKind::DestroyTaskPool: return; // General memory access to a pointer in first operand position. diff --git a/lib/SILGen/SILGenBuiltin.cpp b/lib/SILGen/SILGenBuiltin.cpp index 249a8a2e3794b..16c208d0a4ea3 100644 --- a/lib/SILGen/SILGenBuiltin.cpp +++ b/lib/SILGen/SILGenBuiltin.cpp @@ -1616,6 +1616,45 @@ static ManagedValue emitBuiltinCreateAsyncTaskInGroup( return SGF.emitManagedRValueWithCleanup(apply); } +// TODO(ktoso): deduplicate with group, just assume the future is Void? +// Emit SIL for the named builtin: createAsyncTaskInPool. +static ManagedValue emitBuiltinCreateAsyncTaskInPool( + SILGenFunction &SGF, SILLocation loc, SubstitutionMap subs, + ArrayRef args, SGFContext C) { + ASTContext &ctx = SGF.getASTContext(); + auto flags = args[0].forward(SGF); + auto pool = args[1].borrow(SGF, loc).forward(SGF); + + // Form the metatype of the result type. + CanType futureResultType = + Type(MetatypeType::get(GenericTypeParamType::get(/*isParameterPack*/ false, + /*depth*/ 0, /*index*/ 0, + SGF.getASTContext()), + MetatypeRepresentation::Thick)) + .subst(subs) + ->getCanonicalType(); + CanType anyTypeType = ExistentialMetatypeType::get( + ProtocolCompositionType::get(ctx, { }, false))->getCanonicalType(); + auto &anyTypeTL = SGF.getTypeLowering(anyTypeType); + auto &futureResultTL = SGF.getTypeLowering(futureResultType); + auto futureResultMetadata = SGF.emitExistentialErasure( + loc, futureResultType, futureResultTL, anyTypeTL, { }, C, + [&](SGFContext C) -> ManagedValue { + return ManagedValue::forTrivialObjectRValue( + SGF.B.createMetatype(loc, SGF.getLoweredType(futureResultType))); + }).borrow(SGF, loc).forward(SGF); + + auto function = emitFunctionArgumentForAsyncTaskEntryPoint(SGF, loc, args[2], + futureResultType); + auto apply = SGF.B.createBuiltin( + loc, + ctx.getIdentifier( + getBuiltinName(BuiltinValueKind::CreateAsyncTaskInPool)), + SGF.getLoweredType(getAsyncTaskAndContextType(ctx)), subs, + { flags, pool, futureResultMetadata, function.forward(SGF) }); + return SGF.emitManagedRValueWithCleanup(apply); +} + // Shared implementation of withUnsafeContinuation and // withUnsafe[Throwing]Continuation. static ManagedValue emitBuiltinWithUnsafeContinuation( diff --git a/lib/SILOptimizer/Transforms/AccessEnforcementReleaseSinking.cpp b/lib/SILOptimizer/Transforms/AccessEnforcementReleaseSinking.cpp index 9c85e8e9fa962..b1a364998c323 100644 --- a/lib/SILOptimizer/Transforms/AccessEnforcementReleaseSinking.cpp +++ b/lib/SILOptimizer/Transforms/AccessEnforcementReleaseSinking.cpp @@ -148,6 +148,8 @@ static bool isBarrier(SILInstruction *inst) { case BuiltinValueKind::EndAsyncLetLifetime: case BuiltinValueKind::CreateTaskGroup: case BuiltinValueKind::DestroyTaskGroup: + case BuiltinValueKind::CreateTaskPool: + case BuiltinValueKind::DestroyTaskPool: case BuiltinValueKind::StackAlloc: case BuiltinValueKind::StackDealloc: case BuiltinValueKind::AssumeAlignment: @@ -179,6 +181,7 @@ static bool isBarrier(SILInstruction *inst) { case BuiltinValueKind::StartAsyncLet: case BuiltinValueKind::CreateAsyncTask: case BuiltinValueKind::CreateAsyncTaskInGroup: + case BuiltinValueKind::CreateAsyncTaskInPool: case BuiltinValueKind::TaskRunInline: case BuiltinValueKind::StartAsyncLetWithLocalBuffer: case BuiltinValueKind::ConvertTaskToJob: diff --git a/stdlib/public/BackDeployConcurrency/CMakeLists.txt b/stdlib/public/BackDeployConcurrency/CMakeLists.txt index 6af9dbd0017f8..663bef5965e4a 100644 --- a/stdlib/public/BackDeployConcurrency/CMakeLists.txt +++ b/stdlib/public/BackDeployConcurrency/CMakeLists.txt @@ -115,6 +115,7 @@ add_swift_target_library(swift_Concurrency ${SWIFT_STDLIB_LIBRARY_BUILD_TYPES} I TaskStatus.cpp TaskGroup.cpp TaskGroup.swift + TaskPool.swift TaskLocal.cpp TaskLocal.swift TaskSleep.swift diff --git a/stdlib/public/BackDeployConcurrency/CompatibilityOverrideConcurrency.def b/stdlib/public/BackDeployConcurrency/CompatibilityOverrideConcurrency.def index c194db12be8b6..c2af899afe9fa 100644 --- a/stdlib/public/BackDeployConcurrency/CompatibilityOverrideConcurrency.def +++ b/stdlib/public/BackDeployConcurrency/CompatibilityOverrideConcurrency.def @@ -45,6 +45,7 @@ # define OVERRIDE_TASK OVERRIDE # define OVERRIDE_ASYNC_LET OVERRIDE # define OVERRIDE_TASK_GROUP OVERRIDE +# define OVERRIDE_TASK_POOL OVERRIDE # define OVERRIDE_TASK_LOCAL OVERRIDE # define OVERRIDE_TASK_STATUS OVERRIDE #else @@ -60,6 +61,9 @@ # ifndef OVERRIDE_TASK_GROUP # define OVERRIDE_TASK_GROUP(...) # endif +# ifndef OVERRIDE_TASK_GROUP +# define OVERRIDE_TASK_GROUP(...) +# endif # ifndef OVERRIDE_TASK_LOCAL # define OVERRIDE_TASK_LOCAL(...) # endif @@ -278,6 +282,47 @@ OVERRIDE_TASK_GROUP(taskGroup_addPending, bool, swift::, (TaskGroup *group, bool unconditionally), (group, unconditionally)) +OVERRIDE_TASK_POOL(taskPool_initialize, void, + SWIFT_EXPORT_FROM(swift_Concurrency), SWIFT_CC(swift), + swift::, (TaskPool *pool, const Metadata *Void), (pool, Void)) + +OVERRIDE_TASK_STATUS(taskPool_attachChild, void, + SWIFT_EXPORT_FROM(swift_Concurrency), SWIFT_CC(swift), + swift::, (TaskPool *pool, AsyncTask *child), + (pool, child)) + +OVERRIDE_TASK_POOL(taskPool_destroy, void, + SWIFT_EXPORT_FROM(swift_Concurrency), SWIFT_CC(swift), + swift::, (TaskPool *pool, const Metadata *T), (pool, T)) + +OVERRIDE_TASK_POOL(taskPool_wait_next_throwing, void, + SWIFT_EXPORT_FROM(swift_Concurrency), SWIFT_CC(swiftasync), + swift::, + (OpaqueValue *resultPointer, + SWIFT_ASYNC_CONTEXT AsyncContext *callerContext, + TaskPool *_pool, + ThrowingTaskFutureWaitContinuationFunction *resumeFn, + AsyncContext *callContext), + (resultPointer, callerContext, _pool, resumeFn, + callContext)) + +OVERRIDE_TASK_POOL(taskPool_isEmpty, bool, + SWIFT_EXPORT_FROM(swift_Concurrency), SWIFT_CC(swift), + swift::, (TaskPool *pool, const Metadata *T), (pool, T)) + +OVERRIDE_TASK_POOL(taskPool_isCancelled, bool, + SWIFT_EXPORT_FROM(swift_Concurrency), SWIFT_CC(swift), + swift::, (TaskPool *pool, const Metadata *T), (pool, T)) + +OVERRIDE_TASK_POOL(taskPool_cancelAll, void, + SWIFT_EXPORT_FROM(swift_Concurrency), SWIFT_CC(swift), + swift::, (TaskPool *pool, const Metadata *T), (pool, T)) + +OVERRIDE_TASK_POOL(taskPool_addPending, bool, + SWIFT_EXPORT_FROM(swift_Concurrency), SWIFT_CC(swift), + swift::, (TaskPool *pool, bool unconditionally), + (pool, unconditionally)) + OVERRIDE_TASK_LOCAL(task_reportIllegalTaskLocalBindingWithinWithTaskGroup, void, SWIFT_EXPORT_FROM(swift_Concurrency), SWIFT_CC(swift), swift::, @@ -324,6 +369,10 @@ OVERRIDE_TASK_STATUS(task_hasTaskGroupStatusRecord, bool, SWIFT_EXPORT_FROM(swift_Concurrency), SWIFT_CC(swift), swift::, , ) +OVERRIDE_TASK_POOL(task_hasTaskGroupStatusRecord, bool, + SWIFT_EXPORT_FROM(swift_Concurrency), SWIFT_CC(swift), + swift::, , ) + OVERRIDE_TASK_STATUS(task_attachChild, ChildTaskStatusRecord *, SWIFT_EXPORT_FROM(swift_Concurrency), SWIFT_CC(swift), swift::, (AsyncTask *child), (child)) diff --git a/stdlib/public/BackDeployConcurrency/ConcurrencyRuntime.h b/stdlib/public/BackDeployConcurrency/ConcurrencyRuntime.h index ba6b78fa053a7..8d91c14907857 100644 --- a/stdlib/public/BackDeployConcurrency/ConcurrencyRuntime.h +++ b/stdlib/public/BackDeployConcurrency/ConcurrencyRuntime.h @@ -19,6 +19,7 @@ #include "Task.h" #include "TaskGroup.h" +#include "TaskPool.h" #include "AsyncLet.h" #include "TaskStatus.h" @@ -256,6 +257,117 @@ bool swift_taskGroup_isCancelled(TaskGroup *group); SWIFT_EXPORT_FROM(swift_Concurrency) SWIFT_CC(swift) bool swift_taskGroup_isEmpty(TaskGroup *group); +/// Wait for a readyQueue of a Channel to become non empty. +/// +/// This can be called from any thread. Its Swift signature is +/// +/// \code +/// func swift_taskPool_waitAll( +/// waitingTask: Builtin.NativeObject, // current task +/// pool: Builtin.RawPointer +/// ) async -> T +/// \endcode +SWIFT_EXPORT_FROM(swift_Concurrency) +SWIFT_CC(swiftasync) +void swift_taskPool_waitAll( + OpaqueValue *resultPointer, SWIFT_ASYNC_CONTEXT AsyncContext *callerContext, + TaskPool *pool, ThrowingTaskFutureWaitContinuationFunction *resumeFn, + AsyncContext *callContext); + +/// Initialize a `TaskGroup` in the passed `group` memory location. +/// The caller is responsible for retaining and managing the group's lifecycle. +/// +/// Its Swift signature is +/// +/// \code +/// func swift_taskPool_initialize(group: Builtin.RawPointer) +/// \endcode +SWIFT_EXPORT_FROM(swift_Concurrency) SWIFT_CC(swift) +void swift_taskPool_initialize(TaskPool *pool, const Metadata *Void); + +/// Attach a child task to the parent task's task group record. +/// +/// This function MUST be called from the AsyncTask running the task group. +/// +/// Since the group (or rather, its record) is inserted in the parent task at +/// creation we do not need the parent task here, the group already is attached +/// to it. +/// Its Swift signature is +/// +/// \code +/// func swift_taskPool_attachChild( +/// group: Builtin.RawPointer, +/// child: Builtin.NativeObject +/// ) +/// \endcode +SWIFT_EXPORT_FROM(swift_Concurrency) SWIFT_CC(swift) +void swift_taskPool_attachChild(TaskPool *pool, AsyncTask *child); + +/// Its Swift signature is +/// +/// This function MUST be called from the AsyncTask running the task group. +/// +/// \code +/// func swift_taskPool_destroy(_ group: Builtin.RawPointer) +/// \endcode +SWIFT_EXPORT_FROM(swift_Concurrency) SWIFT_CC(swift) +void swift_taskPool_destroy(TaskPool *pool); + +/// Before starting a task group child task, inform the group that there is one +/// more 'pending' child to account for. +/// +/// This function SHOULD be called from the AsyncTask running the task group, +/// however is generally thread-safe as it only only works with the group status. +/// +/// Its Swift signature is +/// +/// \code +/// func swift_taskPool_addPending( +/// group: Builtin.RawPointer, +/// unconditionally: Bool +/// ) -> Bool +/// \endcode +SWIFT_EXPORT_FROM(swift_Concurrency) SWIFT_CC(swift) +bool swift_taskPool_addPending(TaskPool *pool, bool unconditionally); + +/// Cancel all tasks in the group. +/// This also prevents new tasks from being added. +/// +/// This can be called from any thread. +/// +/// Its Swift signature is +/// +/// \code +/// func swift_taskPool_cancelAll(group: Builtin.RawPointer) +/// \endcode +SWIFT_EXPORT_FROM(swift_Concurrency) SWIFT_CC(swift) +void swift_taskPool_cancelAll(TaskPool *pool); + +/// Check ONLY if the group was explicitly cancelled, e.g. by `cancelAll`. +/// +/// This check DOES NOT take into account the task in which the group is running +/// being cancelled or not. +/// +/// This can be called from any thread. Its Swift signature is +/// +/// \code +/// func swift_taskPool_isCancelled(group: Builtin.RawPointer) +/// \endcode +SWIFT_EXPORT_FROM(swift_Concurrency) SWIFT_CC(swift) +bool swift_taskPool_isCancelled(TaskPool *pool); + +/// Check the readyQueue of a task group, return true if it has no pending tasks. +/// +/// This can be called from any thread. Its Swift signature is +/// +/// \code +/// func swift_taskPool_isEmpty( +/// _ group: Builtin.RawPointer +/// ) -> Bool +/// \endcode +SWIFT_EXPORT_FROM(swift_Concurrency) SWIFT_CC(swift) +bool swift_taskPool_isEmpty(TaskPool *pool); + /// DEPRECATED. swift_asyncLet_begin is used instead. /// Its Swift signature is /// diff --git a/stdlib/public/BackDeployConcurrency/Task.cpp b/stdlib/public/BackDeployConcurrency/Task.cpp index b25f300b109d4..0d433c4a14ae0 100644 --- a/stdlib/public/BackDeployConcurrency/Task.cpp +++ b/stdlib/public/BackDeployConcurrency/Task.cpp @@ -148,6 +148,7 @@ FutureFragment::Status AsyncTask::waitFuture(AsyncTask *waitingTask, } void NullaryContinuationJob::process(Job *_job) { + fprintf(stderr, "[%s:%d](%s) process job!\n", __FILE_NAME__, __LINE__, __FUNCTION__); auto *job = cast(_job); auto *task = job->Task; @@ -162,6 +163,8 @@ void NullaryContinuationJob::process(Job *_job) { } void AsyncTask::completeFuture(AsyncContext *context) { + fprintf(stderr, "[%s:%d](%s) complete future!\n", __FILE_NAME__, __LINE__, __FUNCTION__); + using Status = FutureFragment::Status; using WaitQueueItem = FutureFragment::WaitQueueItem; SWIFT_TASK_DEBUG_LOG("complete future = %p", this); @@ -189,12 +192,16 @@ void AsyncTask::completeFuture(AsyncContext *context) { newQueueHead, std::memory_order_acquire); assert(queueHead.getStatus() == Status::Executing); - // If this is task group child, notify the parent group about the completion. + // If this is task group (or pool) child, notify the parent about the completion. if (hasGroupChildFragment()) { // then we must offer into the parent group that we completed, // so it may `next()` poll completed child tasks in completion order. auto group = groupChildFragment()->getGroup(); group->offer(this, context); + } else if (hasPoolChildFragment()) { + // then we must offer into the parent pool that we completed. + auto pool = groupChildFragment()->getPool(); + pool->offer(this, context); } // Schedule every waiting task on the executor. @@ -489,7 +496,7 @@ static AsyncTaskAndContext swift_task_create_commonImpl( // of in a FutureFragment. hasAsyncLetResultBuffer = true; assert(asyncLet && "Missing async let storage"); - + jobFlags.task_setIsAsyncLetTask(true); jobFlags.task_setIsChildTask(true); break; @@ -564,14 +571,14 @@ static AsyncTaskAndContext swift_task_create_commonImpl( void *allocation = nullptr; if (asyncLet) { assert(parent); - + // If there isn't enough room in the fixed async let allocation to // set up the initial context, then we'll have to allocate more space // from the parent. if (asyncLet->getSizeOfPreallocatedSpace() < amountToAllocate) { hasAsyncLetResultBuffer = false; } - + // DEPRECATED. This is separated from the above condition because we // also have to handle an older async let ABI that did not provide // space for the initial slab in the compiler-generated preallocation. @@ -651,7 +658,7 @@ static AsyncTaskAndContext swift_task_create_commonImpl( auto groupChildFragment = task->groupChildFragment(); new (groupChildFragment) AsyncTask::GroupChildFragment(group); } - + // Initialize the future fragment if applicable. if (futureResultType) { assert(task->isFuture()); diff --git a/stdlib/public/BackDeployConcurrency/Task.h b/stdlib/public/BackDeployConcurrency/Task.h index 0e94ae08f5ed3..b8199f6512a0d 100644 --- a/stdlib/public/BackDeployConcurrency/Task.h +++ b/stdlib/public/BackDeployConcurrency/Task.h @@ -37,6 +37,7 @@ struct SwiftError; class TaskStatusRecord; class TaskOptionRecord; class TaskGroup; +class TaskPool; extern FullMetadata jobHeapMetadata; diff --git a/stdlib/public/BackDeployConcurrency/TaskLocal.h b/stdlib/public/BackDeployConcurrency/TaskLocal.h index cbe17932d6236..87cabf8577453 100644 --- a/stdlib/public/BackDeployConcurrency/TaskLocal.h +++ b/stdlib/public/BackDeployConcurrency/TaskLocal.h @@ -27,6 +27,7 @@ struct OpaqueValue; struct SwiftError; class TaskStatusRecord; class TaskGroup; +class TaskPool; // ==== Task Locals Values --------------------------------------------------- diff --git a/stdlib/public/BackDeployConcurrency/TaskPool.cpp b/stdlib/public/BackDeployConcurrency/TaskPool.cpp new file mode 100644 index 0000000000000..644224c21de3e --- /dev/null +++ b/stdlib/public/BackDeployConcurrency/TaskPool.cpp @@ -0,0 +1,934 @@ +//===--- TaskPool.cpp - Task Pools --------------------------------------===// +// +// This source file is part of the Swift.org open source project +// +// Copyright (c) 2014 - 2021 Apple Inc. and the Swift project authors +// Licensed under Apache License v2.0 with Runtime Library Exception +// +// See https://swift.org/LICENSE.txt for license information +// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors +// +//===----------------------------------------------------------------------===// +// +// Object management for child tasks that are children of a task group. +// +//===----------------------------------------------------------------------===// + +#include "../CompatibilityOverride/CompatibilityOverride.h" + +#include "Debug.h" +#include "TaskPrivate.h" +#include "bitset" +#include "string" +#include "swift/ABI/HeapObject.h" +#include "swift/ABI/Metadata.h" +#include "swift/ABI/Task.h" +#include "swift/ABI/TaskPool.h" +#include "swift/Basic/RelativePointer.h" +#include "swift/Basic/STLExtras.h" +#include "swift/Runtime/Concurrency.h" +#include "swift/Runtime/Config.h" +#include "swift/Runtime/HeapObject.h" +#include "swift/Threading/Mutex.h" +#include +#include + +#if !SWIFT_STDLIB_SINGLE_THREADED_CONCURRENCY +#include +#endif + +#include +#if SWIFT_CONCURRENCY_ENABLE_DISPATCH +#include +#endif + +#if !defined(_WIN32) && !defined(__wasi__) && __has_include() +#include +#endif + +using namespace swift; + +/******************************************************************************/ +/*************************** TASK POOL ***************************************/ +/******************************************************************************/ + +using FutureFragment = AsyncTask::FutureFragment; + +namespace { +class TaskStatusRecord; + +class TaskPoolImpl: public TaskPoolTaskStatusRecord { +public: + /// Describes the status of the group. + enum class ReadyStatus : uintptr_t { + /// The task group is empty, no tasks are pending. + /// Return immediately, there is no point in suspending. + /// + /// The storage is not accessible. + Empty = 0b00, + + // not used: 0b01; same value as the PollStatus MustWait, + // which does not make sense for the ReadyStatus + + /// The future has completed with result (of type \c resultType). + Success = 0b10, + + /// The future has completed by throwing an error (an \c Error + /// existential). + Error = 0b11, + }; + + enum class PollStatus : uintptr_t { + /// The group is known to be empty and we can immediately return nil. + Empty = 0b00, + + /// The task has been enqueued to the groups wait queue. + MustWait = 0b01, + + /// The task has completed with result (of type \c resultType). + Success = 0b10, + + /// The task has completed by throwing an error (an \c Error existential). + Error = 0b11, + }; + + /// The result of waiting on the TaskPoolImpl. + struct PollResult { + PollStatus status; // TODO: pack it into storage pointer or not worth it? + +// /// Storage for the result of the future. +// /// +// /// When the future completed normally, this is a pointer to the storage +// /// of the result value, which lives inside the future task itself. +// /// +// /// When the future completed by throwing an error, this is the error +// /// object itself. +// OpaqueValue *storage; +// +// const Metadata *voidType; + +// /// The completed task, if necessary to keep alive until consumed by next(). +// /// +// /// # Important: swift_release +// /// If if a task is returned here, the task MUST be swift_released +// /// once we are done with it, to balance out the retain made before +// /// when the task was enqueued into the ready queue to keep it alive +// /// until a next() call eventually picks it up. +// AsyncTask *retainedTask; + +// bool isStorageAccessible() { +// return status == PollStatus::Success || +// status == PollStatus::Error || +// status == PollStatus::Empty; +// } + + static PollResult get(AsyncTask *asyncTask, bool hadErrorResult) { + // A TaskPool task is always Void, so we don't even have to collect the result from its future fragment. + return PollResult{ + /*status*/ hadErrorResult ? + PollStatus::Error : + PollStatus::Success +// , +// /*storage*/ hadErrorResult ? +// reinterpret_cast(fragment->getError()) : +// fragment->getStoragePtr(), +// /*voidType*/fragment->getResultType(), +// /*task*/ asyncTask + }; + } + }; + + /// An item within the pending queue. + struct PendingQueueItem { + AsyncTask * const storage; + + AsyncTask *getTask() const { + return storage; + } + + static PendingQueueItem get(AsyncTask *task) { + assert(task == nullptr || task->isFuture()); + return PendingQueueItem{task}; + } + }; + + struct PoolStatus { + static const uint64_t cancelled = 0b1000000000000000000000000000000000000000000000000000000000000000; + static const uint64_t waiting = 0b0100000000000000000000000000000000000000000000000000000000000000; + + // 62 bits for pending tasks counter + static const uint64_t maskPending = 0b0011111111111111111111111111111111111111111111111111111111111111; + static const uint64_t onePendingTask = 0b0000000000000000000000000000000000000000000000000000000000000001; + + uint64_t status; + + bool isCancelled() { + return (status & cancelled) > 0; + } + + bool hasWaitingTask() { + return (status & waiting) > 0; + } + + unsigned int pendingTasks() { + return (status & maskPending); + } + + bool isEmpty() { + return pendingTasks() == 0; + } + + /// Status value decrementing the Ready, Pending and Waiting counters by one. + PoolStatus completingPendingWaiting() { + assert(pendingTasks() && + "can only complete waiting task when pending tasks available"); + assert(hasWaitingTask() && + "can only complete waiting task when waiting task available"); + return PoolStatus{status - waiting - onePendingTask}; + } + + PoolStatus completingWaiting() { + assert(hasWaitingTask() && + "must have waiting task to complete it"); + return PoolStatus{status - waiting}; + } + + /// Pretty prints the status, as follows: + /// PoolStatus{ P:{pending tasks} W:{waiting tasks} {binary repr} } + std::string to_string() { + std::string str; + str.append("PoolStatus{ "); + str.append("C:"); // cancelled + str.append(isCancelled() ? "y " : "n "); + str.append("W:"); // has waiting task + str.append(hasWaitingTask() ? "y " : "n "); + str.append(" P:"); // pending + str.append(std::to_string(pendingTasks())); + str.append(" " + std::bitset<64>(status).to_string()); + str.append(" }"); + return str; + } + + /// Initially there are no waiting and no pending tasks. + static const PoolStatus initial() { + return PoolStatus{0}; + }; + }; + +private: +#if SWIFT_STDLIB_SINGLE_THREADED_CONCURRENCY || SWIFT_CONCURRENCY_TASK_TO_THREAD_MODEL + // Synchronization is simple here. In a single threaded mode, all swift tasks + // run on a single thread so no coordination is needed. In a task-to-thread + // model, only the parent task which created the task group can + // + // (a) add child tasks to a group + // (b) run the child tasks + // + // So we shouldn't need to worry about coordinating between child tasks and + // parents in a task group + void lock() const {} + void unlock() const {} +#else + // TODO: move to lockless via the status atomic (make readyQueue an mpsc_queue_t) + mutable std::mutex mutex_; + + void lock() const { mutex_.lock(); } + void unlock() const { mutex_.unlock(); } +#endif + + /// Used for queue management, counting number of waiting and ready tasks + std::atomic status; + +// /// Queue containing completed tasks offered into this group. +// /// +// /// The low bits contain the status, the rest of the pointer is the +// /// AsyncTask. +// NaiveQueue readyQueue; + + /// The task currently waiting on `group.next()`. Since only the owning + /// task can ever be waiting on a group, this is just either a reference + /// to that task or null. + std::atomic waitQueue; + + const Metadata *voidType; // TODO: must be Void so just assume it + + friend class ::swift::AsyncTask; + +public: + explicit TaskPoolImpl(const Metadata *T) + : TaskPoolTaskStatusRecord(), + status(PoolStatus::initial().status), +// readyQueue(), + waitQueue(nullptr), + voidType(T) + {} + + TaskPoolTaskStatusRecord *getTaskRecord() { + return reinterpret_cast(this); + } + + /// Destroy the storage associated with the group. + void destroy(); + + bool isEmpty() { + auto oldStatus = PoolStatus{status.load(std::memory_order_relaxed)}; + return oldStatus.pendingTasks() == 0; + } + + bool isCancelled() { + auto oldStatus = PoolStatus{status.load(std::memory_order_relaxed)}; + return oldStatus.isCancelled(); + } + + /// Cancel the task group and all tasks within it. + /// + /// Returns `true` if this is the first time cancelling the group, false otherwise. + bool cancelAll(); + + PoolStatus statusCancel() { + auto old = status.fetch_or(PoolStatus::cancelled, + std::memory_order_relaxed); + return PoolStatus{old}; + } + + /// Returns *assumed* new status, including the just performed +1. + PoolStatus statusMarkWaitingAssumeAcquire() { + auto old = status.fetch_or(PoolStatus::waiting, std::memory_order_acquire); + return PoolStatus{old | PoolStatus::waiting}; + } + + PoolStatus statusRemoveWaiting() { + auto old = status.fetch_and(~PoolStatus::waiting, + std::memory_order_release); + return PoolStatus{old}; + } + + // NOTE: the following change from a TaskGroup which adds ready; we just remove pending. + // statusAddReadyAssumeAcquire >>>> statusDecrementPendingAssumeAcquire + /// Returns *assumed* new status, including the just performed +1. + PoolStatus statusDecrementPendingAssumeAcquire() { + auto old = status.fetch_sub(PoolStatus::onePendingTask, + std::memory_order_acquire); + return PoolStatus{old - PoolStatus::onePendingTask}; + } + + PoolStatus statusRemovePendingAcquire() { + auto old = status.fetch_add(PoolStatus::onePendingTask, + std::memory_order_acquire); + return PoolStatus{old - PoolStatus::onePendingTask}; + } + + /// Add a single pending task to the status counter. + /// This is used to implement next() properly, as we need to know if there + /// are pending tasks worth suspending/waiting for or not. + /// + /// Note that the group does *not* store child tasks at all, as they are + /// stored in the `TaskPoolTaskStatusRecord` inside the current task, that + /// is currently executing the group. Here we only need the counts of + /// pending/ready tasks. + /// + /// If the `unconditionally` parameter is `true` the operation always successfully + /// adds a pending task, even if the group is cancelled. If the unconditionally + /// flag is `false`, the added pending count will be *reverted* before returning. + /// This is because we will NOT add a task to a cancelled group, unless doing + /// so unconditionally. + /// + /// Returns *assumed* new status, including the just performed +1. + PoolStatus statusAddPendingTaskRelaxed(bool unconditionally) { + auto old = status.fetch_add(PoolStatus::onePendingTask, + std::memory_order_relaxed); + auto s = PoolStatus{old + PoolStatus::onePendingTask}; + + if (!unconditionally && s.isCancelled()) { + // revert that add, it was meaningless + auto o = status.fetch_sub(PoolStatus::onePendingTask, + std::memory_order_relaxed); + s = PoolStatus{o - PoolStatus::onePendingTask}; + } + + return s; + } + + PoolStatus statusLoadRelaxed() { + return PoolStatus{status.load(std::memory_order_relaxed)}; + } + +// /// Compare-and-set old status to a status derived from the old one, +// /// by simultaneously decrementing one Pending and one Waiting tasks. +// /// +// /// This is used to atomically perform a waiting task completion. +// bool statusCompletePendingReadyWaiting(PoolStatus &old) { +// return status.compare_exchange_strong( +// old.status, old.completingPendingReadyWaiting().status, +// /*success*/ std::memory_order_relaxed, +// /*failure*/ std::memory_order_relaxed); +// } +// +// bool statusCompletePendingReady(PoolStatus &old) { +// return status.compare_exchange_strong( +// old.status, old.completingPendingReady().status, +// /*success*/ std::memory_order_relaxed, +// /*failure*/ std::memory_order_relaxed); +// } + + + /// Offer result of a task into this task pool. + /// + /// Unlike a task group, result values are never stored and we immediately + /// release the task after decrementing the `pending` count in the pool's status. + /// + /// If the TaskPool is currently "draining" tasks (i.e. its body has completed), + /// there may be a `waiting` task. If so, and this is the last pending task, + /// this offer will resume it, allowing the TaskPool to complete and destroy itself. + void offer(AsyncTask *completed, AsyncContext *context); + + /// A `TaskPool` is not able to wait on individual completions, + /// instead, it can only await on "all pending tasks have been processed". + /// + /// + /// If unable to complete the waiting task immediately (with an readily + /// available completed task), either returns an `PollStatus::Empty` + /// result if it is known that no pending tasks in the group, + /// or a `PollStatus::MustWait` result if there are tasks in flight + /// and the waitingTask eventually be woken up by a completion. + PollResult waitAll(AsyncTask *waitingTask); + +private: + /// Enqueue the completed task onto ready queue if there are no waiting tasks yet + PoolStatus completeTask(AsyncTask *completedTask); +}; + +} // end anonymous namespace + +/******************************************************************************/ +/************************ TASK POOL IMPLEMENTATION ***************************/ +/******************************************************************************/ + +using ReadyStatus = TaskPoolImpl::ReadyStatus; +using PollResult = TaskPoolImpl::PollResult; +using PollStatus = TaskPoolImpl::PollStatus; + +static_assert(sizeof(TaskPoolImpl) <= sizeof(TaskPool) && + alignof(TaskPoolImpl) <= alignof(TaskPool), + "TaskPoolImpl doesn't fit in TaskPool"); + +static TaskPoolImpl *asImpl(TaskPool *group) { + return reinterpret_cast(group); +} + +static TaskPool *asAbstract(TaskPoolImpl *group) { + return reinterpret_cast(group); +} + +TaskPoolTaskStatusRecord * TaskPool::getTaskRecord() { + return asImpl(this)->getTaskRecord(); +} + +// ============================================================================= +// ==== initialize ------------------------------------------------------------- + +// Initializes into the preallocated _pool an actual TaskPoolImpl. +SWIFT_CC(swift) +static void swift_taskPool_initializeImpl(TaskPool *group, const Metadata *Void) { + SWIFT_TASK_DEBUG_LOG("creating task group = %p", group); + + TaskPoolImpl *impl = ::new (group) TaskPoolImpl(Void); + auto record = impl->getTaskRecord(); + assert(impl == record && "the group IS the task record"); + + // ok, now that the group actually is initialized: attach it to the task + addStatusRecord(record, [&](ActiveTaskStatus parentStatus) { + // If the task has already been cancelled, reflect that immediately in + // the group's status. + if (parentStatus.isCancelled()) { + impl->statusCancel(); + } + return true; + }); +} + +// ============================================================================= +// ==== child task management -------------------------------------------------- + +void TaskPool::addChildTask(AsyncTask *child) { + SWIFT_TASK_DEBUG_LOG("attach child task = %p to pool = %p", child, this); + + // Add the child task to this task group. The corresponding removal + // won't happen until the parent task successfully polls for this child + // task, either synchronously in poll (if a task is available + // synchronously) or asynchronously in offer (otherwise). In either + // case, the work ends up being non-concurrent with the parent task. + + // The task status record lock is held during this operation, which + // prevents us from racing with cancellation or escalation. We don't + // need to acquire the task group lock because the child list is only + // accessed under the task status record lock. + auto record = asImpl(this)->getTaskRecord(); + record->attachChild(child); +} + +void TaskPool::removeChildTask(AsyncTask *child) { + SWIFT_TASK_DEBUG_LOG("detach child task = %p from group = %p", child, this); + + auto record = asImpl(this)->getTaskRecord(); + + // The task status record lock is held during this operation, which + // prevents us from racing with cancellation or escalation. We don't + // need to acquire the task group lock because the child list is only + // accessed under the task status record lock. + record->detachChild(child); +} + +// ============================================================================= +// ==== destroy ---------------------------------------------------------------- +SWIFT_CC(swift) +static void swift_taskPool_destroyImpl(TaskPool *group) { + asImpl(group)->destroy(); +} + +void TaskPoolImpl::destroy() { + SWIFT_TASK_DEBUG_LOG("destroying task group = %p", this); + if (!this->isEmpty()) { + auto status = this->statusLoadRelaxed(); + SWIFT_TASK_DEBUG_LOG("destroying task group = %p, tasks .ready = %d, .pending = %d", + this, status.readyTasks(), status.pendingTasks()); + } + assert(this->isEmpty() && "Attempted to destroy non-empty task group!"); + + // First, remove the group from the task and deallocate the record + removeStatusRecord(getTaskRecord()); + + // No need to drain our queue here, as by the time we call destroy, + // all tasks inside the group must have been awaited on already. + // This is done in Swift's withTaskPool function explicitly. + + // destroy the group's storage + this->~TaskPoolImpl(); +} + +// ============================================================================= +// ==== offer ------------------------------------------------------------------ + +void TaskPool::offer(AsyncTask *completedTask, AsyncContext *context) { + asImpl(this)->offer(completedTask, context); +} + +bool TaskPool::isCancelled() { + return asImpl(this)->isCancelled(); +} + +//static void fillPoolNextResult(TaskFutureWaitAsyncContext *context, +// PollResult result) { +// /// Fill in the result value +// switch (result.status) { +// case PollStatus::MustWait: +// assert(false && "filling a waiting status?"); +// return; +// +// case PollStatus::Error: { +// context->fillWithError(reinterpret_cast(result.storage)); +// return; +// } +// +// case PollStatus::Success: { +// // Initialize the result as an Optional. +// const Metadata *voidType = result.voidType; +// OpaqueValue *destPtr = context->successResultPointer; +// voidType->vw_storeEnumTagSinglePayload(destPtr, 1, 1); +// return; +// } +// } +//} + +static void fillPoolNextVoidResult(TaskFutureWaitAsyncContext *context, + const Metadata *voidType, + PollResult result) { + /// Fill in the result value + switch (result.status) { + case PollStatus::MustWait: + assert(false && "filling a waiting status?"); + return; + + case PollStatus::Error: { + assert(false && "cannot have errors"); + return; + } + + case PollStatus::Success: { + // Initialize the result as an Optional. +// const Metadata *voidType = nullptr; // result.voidType; // FIXME: should be Void type + OpaqueValue *destPtr = context->successResultPointer; + // TODO: figure out a way to try to optimistically take the + // value out of the finished task's future, if there are no + // remaining references to it. + voidType->vw_initializeWithCopy(destPtr, nullptr); + voidType->vw_storeEnumTagSinglePayload(destPtr, 0, 1); + return; + } + + case PollStatus::Empty: { + // Initialize the result as a nil Optional. +// const Metadata *voidType = nullptr; // result.voidType; // FIXME: should be Void type + OpaqueValue *destPtr = context->successResultPointer; + voidType->vw_storeEnumTagSinglePayload(destPtr, 1, 1); + return; + } + } +} + +// TaskPool is locked upon entry and exit +TaskPoolImpl::PoolStatus TaskPoolImpl::completeTask(AsyncTask *completedTask) { + SWIFT_TASK_DEBUG_LOG("pool does not retain tasks for their results; we're done here = %p", completedTask); + // DO NOT RETAIN THE TASK. + // We know it is Void, so we don't need to store the result; + // By releasing tasks eagerly we're able to keep "infinite" task groups, + // running, that never consume their values. Even more-so, + + return this->statusDecrementPendingAssumeAcquire(); +} + +void TaskPoolImpl::offer(AsyncTask *completedTask, AsyncContext *context) { + fprintf(stderr, "[%s:%d](%s) offer task = %p, pool = %p\n", __FILE_NAME__, __LINE__, __FUNCTION__, completedTask, pool); + assert(completedTask); + assert(completedTask->isFuture()); + assert(completedTask->hasChildFragment()); + assert(completedTask->hasPoolChildFragment()); + assert(completedTask->poolChildFragment()->getPool() == asAbstract(this)); + SWIFT_TASK_DEBUG_LOG("offer task %p to pool %p", completedTask, this); + + lock(); // TODO: remove fragment lock, and use status for synchronization + + // Immediately decrement the pending count; we do not keep track of "ready" tasks and never store them; + // This is different from a task group, which has to keep the pending count and add +1 "ready" when offered to. + auto assumed = statusDecrementPendingAssumeAcquire(); + SWIFT_TASK_DEBUG_LOG("pool %p, remaining pending: %d", this, assumed.pendingTasks()); + + auto asyncContextPrefix = reinterpret_cast( + reinterpret_cast(context) - sizeof(FutureAsyncContextPrefix)); + bool hadErrorResult = false; + auto errorObject = asyncContextPrefix->errorResult; + if (errorObject) { + // instead, we need to enqueue this result: + hadErrorResult = true; + } + + // ==== a) has waiting task. + // A TaskPool only has a waiting task while terminating, and that task shall only be resumed once + // all tasks have been processed. Only resume the waiting task if this was the last pending task. + if (assumed.hasWaitingTask() && assumed.pendingTasks() == 0) { + auto waitingTask = waitQueue.load(std::memory_order_acquire); + SWIFT_TASK_DEBUG_LOG("group has waiting task = %p, complete with = %p", + waitingTask, completedTask); + while (true) { + // ==== a) run waiting task directly ------------------------------------- + assert(assumed.hasWaitingTask()); + assert(assumed.pendingTasks() && "offered to pool with no pending tasks!"); + // We are the "first" completed task to arrive, + // and since there is a task waiting we immediately claim and complete it. + if (waitQueue.compare_exchange_strong( + waitingTask, nullptr, + /*success*/ std::memory_order_release, + /*failure*/ std::memory_order_acquire)) { + +#if SWIFT_CONCURRENCY_TASK_TO_THREAD_MODEL + // In the task-to-thread model, child tasks are always actually + // run synchronously on the parent task's thread. For task groups + // specifically, this means that poll() will pick a child task + // that was added to the group and run it to completion as a + // subroutine. Therefore, when we enter offer(), we know that + // the parent task is waiting and we can just return to it. + + // The task-to-thread logic in poll() currently expects the child + // task to enqueue itself instead of just filling in the result in + // the waiting task. This is a little wasteful; there's no reason + // we can't just have the parent task set itself up as a waiter. + // But since it's what we're doing, we basically take the same + // path as we would if there wasn't a waiter. +// completeTask(completedTask); + unlock(); // TODO: remove fragment lock, and use status for synchronization + return; + +#else /* SWIFT_CONCURRENCY_TASK_TO_THREAD_MODEL */ + // Run the task. + auto result = PollResult::get(completedTask, hadErrorResult); + + unlock(); // TODO: remove fragment lock, and use status for synchronization + + // Remove the child from the task group's running tasks list. + // The parent task isn't currently running (we're about to wake + // it up), so we're still synchronous with it. We can safely + // acquire our parent's status record lock here (which would + // ordinarily run the risk of deadlock, since e.g. cancellation + // does a parent -> child traversal while recursively holding + // locks) because we know that the child task is completed and + // we can't be holding its locks ourselves. + _swift_taskPool_detachChild(asAbstract(this), completedTask); + + auto waitingContext = + static_cast( + waitingTask->ResumeContext); + + fillPoolNextVoidResult(waitingContext, voidType, result); + + _swift_tsan_acquire(static_cast(waitingTask)); + // TODO: allow the caller to suggest an executor + waitingTask->flagAsAndEnqueueOnExecutor(ExecutorRef::generic()); + + // completedTask will be released by the remainder of its + // completion function. + return; +#endif + } + } + llvm_unreachable("should have enqueued and returned."); + } else { + // ==== b) enqueue completion ------------------------------------------------ + // + // else, no-one was waiting (yet), so we have to instead enqueue to the message + // queue when a task polls during next() it will notice that we have a value + // ready for it, and will process it immediately without suspending. + assert(!waitQueue.load(std::memory_order_relaxed)); + + completeTask(completedTask); + unlock(); // TODO: remove fragment lock, and use status for synchronization + } + + return; +} + +SWIFT_CC(swiftasync) +static void +TASK_POOL_wait_resume_adapter(SWIFT_ASYNC_CONTEXT AsyncContext *_context) { + + auto context = static_cast(_context); + auto resumeWithError = + reinterpret_cast(context->ResumeParent); + return resumeWithError(context->Parent, context->errorResult); +} + +#ifdef __ARM_ARCH_7K__ +__attribute__((noinline)) +SWIFT_CC(swiftasync) static void workaround_function_swift_taskPool_waitAllImpl( + OpaqueValue *result, SWIFT_ASYNC_CONTEXT AsyncContext *callerContext, + TaskPool *_pool, + ThrowingTaskFutureWaitContinuationFunction resumeFunction, + AsyncContext *callContext) { + // Make sure we don't eliminate calls to this function. + asm volatile("" // Do nothing. + : // Output list, empty. + : "r"(result), "r"(callerContext), "r"(_pool) // Input list. + : // Clobber list, empty. + ); + return; +} +#endif + +// ============================================================================= +// ==== group.next() implementation (wait_next and groupPoll) ------------------ + +SWIFT_CC(swiftasync) +static void swift_taskPool_waitAllImpl( + OpaqueValue *resultPointer, SWIFT_ASYNC_CONTEXT AsyncContext *callerContext, + TaskPool *_pool, + ThrowingTaskFutureWaitContinuationFunction *resumeFunction, + AsyncContext *rawContext) { + auto waitingTask = swift_task_getCurrent(); + waitingTask->ResumeTask = TASK_POOL_wait_resume_adapter; + waitingTask->ResumeContext = rawContext; + + auto context = static_cast(rawContext); + context->ResumeParent = + reinterpret_cast(resumeFunction); + context->Parent = callerContext; + context->errorResult = nullptr; + context->successResultPointer = resultPointer; + + auto pool = asImpl(_pool); + assert(pool && "swift_taskPool_waitAll was passed context without group!"); + + PollResult polled = pool->waitAll(waitingTask); + switch (polled.status) { + case PollStatus::MustWait: + SWIFT_TASK_DEBUG_LOG("poll group = %p, no ready tasks, waiting task = %p", + group, waitingTask); + // The waiting task has been queued on the channel, + // there were pending tasks so it will be woken up eventually. +#ifdef __ARM_ARCH_7K__ + return workaround_function_swift_taskPool_waitAllImpl( + resultPointer, callerContext, _pool, resumeFunction, rawContext); +#else /* __ARM_ARCH_7K__ */ + return; +#endif /* __ARM_ARCH_7K__ */ + + case PollStatus::Empty: + case PollStatus::Error: + case PollStatus::Success: + SWIFT_TASK_DEBUG_LOG("poll group = %p, task = %p, ready task available = %p", + group, waitingTask, polled.retainedTask); +// if (group->eagerlyReleaseCompleteTasks) { + fillPoolNextVoidResult(context, pool->voidType, polled); +// } else { +// fillPoolNextResult(context, polled); +// } +// if (auto completedTask = polled.retainedTask) { +// // Remove the child from the task group's running tasks list. +// _swift_taskPool_detachChild(asAbstract(group), completedTask); +// +// // Balance the retain done by completeTask. +// swift_release(completedTask); +// } + + return waitingTask->runInFullyEstablishedContext(); + } +} + +PollResult TaskPoolImpl::waitAll(AsyncTask *waitingTask) { + lock(); // TODO: remove group lock, and use status for synchronization + SWIFT_TASK_DEBUG_LOG("pool = %p, waitAll pending", this); + + PollResult result; + + // Have we suspended the task? + bool hasSuspended = false; + bool haveRunOneChildTaskInline = false; + +reevaluate_if_taskgroup_has_results:; + auto assumed = statusMarkWaitingAssumeAcquire(); + // ==== 1) bail out early if no tasks are pending ---------------------------- + if (assumed.isEmpty()) { + SWIFT_TASK_DEBUG_LOG("poll group = %p, group is empty, no pending tasks", this); + // No tasks in flight, we know no tasks were submitted before this poll + // was issued, and if we parked here we'd potentially never be woken up. + // Bail out and return `nil` from `group.next()`. + statusRemoveWaiting(); + result.status = PollStatus::Empty; + // result.voidType = this->voidType; + unlock(); // TODO: remove group lock, and use status for synchronization + return result; + } + + auto waitHead = waitQueue.load(std::memory_order_acquire); + + // ==== 3) Add to wait queue ------------------------------------------------- + _swift_tsan_release(static_cast(waitingTask)); + while (true) { + if (!hasSuspended) { + hasSuspended = true; + waitingTask->flagAsSuspended(); + } + // Put the waiting task at the beginning of the wait queue. + if (waitQueue.compare_exchange_strong( + waitHead, waitingTask, + /*success*/ std::memory_order_release, + /*failure*/ std::memory_order_acquire)) { + unlock(); // TODO: remove fragment lock, and use status for synchronization +#if SWIFT_CONCURRENCY_TASK_TO_THREAD_MODEL + // The logic here is paired with the logic in TaskPoolImpl::offer. Once + // we run the + auto oldTask = _swift_task_clearCurrent(); + assert(oldTask == waitingTask); + + auto childTask = getTaskRecord()->getFirstChild(); + assert(childTask != NULL); + + SWIFT_TASK_DEBUG_LOG("[RunInline] Switching away from running %p to now running %p", oldTask, childTask); + // Run the new task on the same thread now - this should run the new task to + // completion. All swift tasks in task-to-thread model run on generic + // executor + swift_job_run(childTask, ExecutorRef::generic()); + haveRunOneChildTaskInline = true; + + SWIFT_TASK_DEBUG_LOG("[RunInline] Switching back from running %p to now running %p", childTask, oldTask); + // We are back to being the parent task and now that we've run the child + // task, we should reevaluate parent task + _swift_task_setCurrent(oldTask); + goto reevaluate_if_taskgroup_has_results; +#endif + // no ready tasks, so we must wait. + result.status = PollStatus::MustWait; + _swift_task_clearCurrent(); + return result; + } // else, try again + } +} + +// ============================================================================= +// ==== isEmpty ---------------------------------------------------------------- + +SWIFT_CC(swift) +static bool swift_taskPool_isEmptyImpl(TaskPool *pool) { + return asImpl(pool)->isEmpty(); +} + +// ============================================================================= +// ==== isCancelled ------------------------------------------------------------ + +SWIFT_CC(swift) +static bool swift_taskPool_isCancelledImpl(TaskPool *pool) { + return asImpl(pool)->isCancelled(); +} + +// ============================================================================= +// ==== cancelAll -------------------------------------------------------------- + +SWIFT_CC(swift) +static void swift_taskPool_cancelAllImpl(TaskPool *pool) { + asImpl(pool)->cancelAll(); +} + +bool TaskPoolImpl::cancelAll() { + SWIFT_TASK_DEBUG_LOG("cancel all tasks in pool = %p", this); + + // Flag the task group itself as cancelled. If this was already + // done, any existing child tasks should already have been cancelled, + // and cancellation should automatically flow to any new child tasks, + // so there's nothing else for us to do. + auto old = statusCancel(); + if (old.isCancelled()) { + return false; + } + + // Cancel all the child tasks. TaskPool is not a Sendable type, + // so cancelAll() can only be called from the owning task. This + // satisfies the precondition on cancelAllChildren(). + _swift_taskPool_cancelAllChildren(asAbstract(this)); + + return true; +} + +SWIFT_CC(swift) +static void swift_task_cancel_pool_child_tasksImpl(TaskPool *pool) { + // TaskPool is not a Sendable type, and so this operation (which is not + // currently exposed in the API) can only be called from the owning + // task. This satisfies the precondition on cancelAllChildren(). + _swift_taskPool_cancelAllChildren(pool); +} + +/// Cancel all the children of the given task group. +/// +/// The caller must guarantee that this is either called from the +/// owning task of the task group or while holding the owning task's +/// status record lock. +void swift::_swift_taskPool_cancelAllChildren(TaskPool *pool) { + // Because only the owning task of the task group can modify the + // child list of a task group status record, and it can only do so + // while holding the owning task's status record lock, we do not need + // any additional synchronization within this function. + for (auto childTask: pool->getTaskRecord()->children()) + swift_task_cancel(childTask); +} + +// ============================================================================= +// ==== addPending ------------------------------------------------------------- + +SWIFT_CC(swift) +static bool swift_taskPool_addPendingImpl(TaskPool *pool, bool unconditionally) { + auto assumed = asImpl(pool)->statusAddPendingTaskRelaxed(unconditionally); + SWIFT_TASK_DEBUG_LOG("add pending %s to pool %p, tasks pending = %d", + unconditionally ? "unconditionally" : "", + pool, assumed.pendingTasks()); + return !assumed.isCancelled(); +} + +#define OVERRIDE_TASK_POOL COMPATIBILITY_OVERRIDE +#include COMPATIBILITY_OVERRIDE_INCLUDE_PATH diff --git a/stdlib/public/BackDeployConcurrency/TaskPool.h b/stdlib/public/BackDeployConcurrency/TaskPool.h new file mode 100644 index 0000000000000..12d973afdabe3 --- /dev/null +++ b/stdlib/public/BackDeployConcurrency/TaskPool.h @@ -0,0 +1,61 @@ +//===--- TaskPool.h - ABI structures for task pools -00--------*- C++ -*-===// +// +// This source file is part of the Swift.org open source project +// +// Copyright (c) 2014 - 2022 Apple Inc. and the Swift project authors +// Licensed under Apache License v2.0 with Runtime Library Exception +// +// See https://swift.org/LICENSE.txt for license information +// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors +// +//===----------------------------------------------------------------------===// +// +// Swift ABI describing task pools. +// +//===----------------------------------------------------------------------===// + +#ifndef SWIFT_ABI_TASK_POOL_BACKDEPLOYED_H +#define SWIFT_ABI_TASK_POOL_BACKDEPLOYED_H + +#include "swift/ABI/Task.h" +#include "swift/ABI/TaskStatus.h" +#include "swift/ABI/HeapObject.h" +#include "swift/Runtime/Concurrency.h" +#include "swift/Runtime/Config.h" +#include "swift/Basic/RelativePointer.h" +#include "swift/Basic/STLExtras.h" + +namespace swift { + +/// The task group is responsible for maintaining dynamically created child tasks. + class alignas(Alignment_TaskPool) TaskPool { + public: + // These constructors do not initialize the group instance, and the + // destructor does not destroy the group instance; you must call + // swift_taskGroup_{initialize,destroy} yourself. + constexpr TaskPool() + : PrivateData{} {} + + void *PrivateData[NumWords_TaskPool]; + + /// Upon a future task's completion, offer it to the task group it belongs to. + void offer(AsyncTask *completed, AsyncContext *context); + + /// Checks the cancellation status of the group. + bool isCancelled(); + + // Add a child task to the task group. Always called while holding the + // status record lock of the task group's owning task. + void addChildTask(AsyncTask *task); + + // Remove a child task from the task group. Always called while holding + // the status record lock of the task group's owning task. + void removeChildTask(AsyncTask *task); + + // Provide accessor for task group's status record + TaskPoolTaskStatusRecord *getTaskRecord(); + }; + +} // end namespace swift + +#endif // SWIFT_ABI_TASK_POOL_BACKDEPLOYED_H diff --git a/stdlib/public/BackDeployConcurrency/TaskPrivate.h b/stdlib/public/BackDeployConcurrency/TaskPrivate.h index 8e581202ae6cf..f809ce16197fd 100644 --- a/stdlib/public/BackDeployConcurrency/TaskPrivate.h +++ b/stdlib/public/BackDeployConcurrency/TaskPrivate.h @@ -44,7 +44,7 @@ namespace swift { // Set to 1 to enable helpful debug spew to stderr // If this is enabled, tests with `swift_task_debug_log` requirement can run. -#if 0 +#if 1 #define SWIFT_TASK_DEBUG_LOG(fmt, ...) \ fprintf(stderr, "[%lu] [%s:%d](%s) " fmt "\n", \ (unsigned long)Thread::current()::platformThreadId(), \ @@ -56,6 +56,7 @@ namespace swift { class AsyncTask; class TaskGroup; +class TaskPool; /// Allocate task-local memory on behalf of a specific task, /// not necessarily the current one. Generally this should only be diff --git a/stdlib/public/BackDeployConcurrency/TaskStatus.cpp b/stdlib/public/BackDeployConcurrency/TaskStatus.cpp index 10195bd3d86b8..8dbc4c80d6aba 100644 --- a/stdlib/public/BackDeployConcurrency/TaskStatus.cpp +++ b/stdlib/public/BackDeployConcurrency/TaskStatus.cpp @@ -438,6 +438,33 @@ static void swift_taskGroup_attachChildImpl(TaskGroup *group, }); } +SWIFT_CC(swift) +static void swift_taskPool_attachChildImpl(TaskPool *pool, + AsyncTask *child) { + + // We are always called from the context of the parent + // + // Acquire the status record lock of parent - we want to synchronize with + // concurrent cancellation or escalation as we're adding new tasks to the + // group. + auto parent = child->childFragment()->getParent(); + assert(parent == swift_task_getCurrent()); + + withStatusRecordLock(parent, LockContext::OnTask, [&](ActiveTaskStatus &parentStatus) { + pool->addChildTask(child); + + // After getting parent's status record lock, do some sanity checks to + // see if parent task or group has state changes that need to be + // propagated to the child. + // + // This is the same logic that we would do if we were adding a child + // task status record - see also asyncLet_addImpl. Since we attach a + // child task to a TaskGroupRecord instead, we synchronize on the + // parent's task status and then update the child. + updateNewChildWithParentAndContainerState(child, parentStatus, /*group=*/nullptr, pool); + }); +} + /****************************** CANCELLATION ******************************/ /**************************************************************************/ @@ -545,6 +572,12 @@ static void performEscalationAction(TaskStatusRecord *record, swift_task_escalate(child, newPriority); return; } + case TaskStatusRecordKind::TaskPool: { + auto childRecord = cast(record); + for (AsyncTask *child: childRecord->children()) + swift_task_escalate(child, newPriority); + return; + } // Cancellation notifications can be ignore. case TaskStatusRecordKind::CancellationNotification: diff --git a/stdlib/public/CompatibilityOverride/CompatibilityOverrideConcurrency.def b/stdlib/public/CompatibilityOverride/CompatibilityOverrideConcurrency.def index 877170f5927e9..8588f1d538865 100644 --- a/stdlib/public/CompatibilityOverride/CompatibilityOverrideConcurrency.def +++ b/stdlib/public/CompatibilityOverride/CompatibilityOverrideConcurrency.def @@ -75,6 +75,7 @@ # define OVERRIDE_TASK OVERRIDE # define OVERRIDE_ASYNC_LET OVERRIDE # define OVERRIDE_TASK_GROUP OVERRIDE +# define OVERRIDE_TASK_POOL OVERRIDE # define OVERRIDE_TASK_LOCAL OVERRIDE # define OVERRIDE_TASK_STATUS OVERRIDE #else @@ -90,6 +91,9 @@ # ifndef OVERRIDE_TASK_GROUP # define OVERRIDE_TASK_GROUP(...) # endif +# ifndef OVERRIDE_TASK_POOL +# define OVERRIDE_TASK_POOL(...) +# endif # ifndef OVERRIDE_TASK_LOCAL # define OVERRIDE_TASK_LOCAL(...) # endif @@ -312,6 +316,45 @@ OVERRIDE_TASK_GROUP(taskGroup_addPending, bool, swift::, (TaskGroup *group, bool unconditionally), (group, unconditionally)) +OVERRIDE_TASK_POOL(taskPool_initialize, void, + SWIFT_EXPORT_FROM(swift_Concurrency), SWIFT_CC(swift), + swift::, (TaskPool * pool, const Metadata *Void), (pool, Void)) + +OVERRIDE_TASK_STATUS(taskPool_attachChild, void, + SWIFT_EXPORT_FROM(swift_Concurrency), SWIFT_CC(swift), + swift::, (TaskPool * pool, AsyncTask * child), + (pool, child)) + +OVERRIDE_TASK_POOL(taskPool_destroy, void, + SWIFT_EXPORT_FROM(swift_Concurrency), SWIFT_CC(swift), + swift::, (TaskPool * pool), (pool)) + +OVERRIDE_TASK_POOL(taskPool_waitAll, void, + SWIFT_EXPORT_FROM(swift_Concurrency), SWIFT_CC(swiftasync), + swift::, + (OpaqueValue * resultPointer, + SWIFT_ASYNC_CONTEXT AsyncContext *callerContext, + TaskPool *_pool, + ThrowingTaskFutureWaitContinuationFunction *resumeFn, + AsyncContext *callContext), + (resultPointer, callerContext, _pool, resumeFn, callContext)) + +OVERRIDE_TASK_POOL(taskPool_isEmpty, bool, + SWIFT_EXPORT_FROM(swift_Concurrency), SWIFT_CC(swift), + swift::, (TaskPool * pool), (pool)) + +OVERRIDE_TASK_POOL(taskPool_isCancelled, bool, + SWIFT_EXPORT_FROM(swift_Concurrency), SWIFT_CC(swift), + swift::, (TaskPool * pool), (pool)) + +OVERRIDE_TASK_POOL(taskPool_cancelAll, void, + SWIFT_EXPORT_FROM(swift_Concurrency), SWIFT_CC(swift), + swift::, (TaskPool * pool), (pool)) + +OVERRIDE_TASK_POOL(taskPool_addPending, bool, + SWIFT_EXPORT_FROM(swift_Concurrency), SWIFT_CC(swift), + swift::, (TaskPool * pool, bool unconditionally), + (pool, unconditionally)) OVERRIDE_TASK_LOCAL(task_reportIllegalTaskLocalBindingWithinWithTaskGroup, void, SWIFT_EXPORT_FROM(swift_Concurrency), SWIFT_CC(swift), swift::, @@ -367,5 +410,6 @@ OVERRIDE_TASK_STATUS(task_getNearestDeadline, NearestTaskDeadline, #undef OVERRIDE_TASK #undef OVERRIDE_ASYNC_LET #undef OVERRIDE_TASK_GROUP +#undef OVERRIDE_TASK_POOL #undef OVERRIDE_TASK_LOCAL #undef OVERRIDE_TASK_STATUS diff --git a/stdlib/public/Concurrency/AsyncLet.cpp b/stdlib/public/Concurrency/AsyncLet.cpp index b3bfa3e1a1392..5136d9d5cbb67 100644 --- a/stdlib/public/Concurrency/AsyncLet.cpp +++ b/stdlib/public/Concurrency/AsyncLet.cpp @@ -151,7 +151,7 @@ void swift::asyncLet_addImpl(AsyncTask *task, AsyncLet *asyncLet, // current task bool addedRecord = addStatusRecord(record, [&](ActiveTaskStatus parentStatus) { - updateNewChildWithParentAndGroupState(task, parentStatus, NULL); + updateNewChildWithParentAndContainerState(task, parentStatus, NULL, NULL); return true; }); assert(addedRecord); diff --git a/stdlib/public/Concurrency/CMakeLists.txt b/stdlib/public/Concurrency/CMakeLists.txt index d41069b01546a..ad90119d85e6f 100644 --- a/stdlib/public/Concurrency/CMakeLists.txt +++ b/stdlib/public/Concurrency/CMakeLists.txt @@ -113,6 +113,8 @@ add_swift_target_library(swift_Concurrency ${SWIFT_STDLIB_LIBRARY_BUILD_TYPES} I TaskStatus.cpp TaskGroup.cpp TaskGroup.swift + TaskPool.cpp + TaskPool.swift TaskLocal.cpp TaskLocal.swift TaskSleep.swift diff --git a/stdlib/public/Concurrency/Task.cpp b/stdlib/public/Concurrency/Task.cpp index 11f5933bf4b69..2277f1b4c751c 100644 --- a/stdlib/public/Concurrency/Task.cpp +++ b/stdlib/public/Concurrency/Task.cpp @@ -279,6 +279,15 @@ void AsyncTask::completeFuture(AsyncContext *context) { group->offer(this, context); } + // If this is task pool child, notify the parent group about the completion. + if (hasPoolChildFragment()) { + SWIFT_TASK_DEBUG_LOG("offer task = %p to pool", this); + // then we must offer into the parent pool that we completed. + auto pool = poolChildFragment()->getPool(); + SWIFT_TASK_DEBUG_LOG("offer task = %p to pool = %p", this, pool); + pool->offer(this, context); + } + // Schedule every waiting task on the executor. auto waitingTask = queueHead.getTask(); @@ -454,11 +463,7 @@ static void completeTaskImpl(AsyncTask *task, task->completeFuture(context); } - // TODO: set something in the status? - // if (task->hasChildFragment()) { - // TODO: notify the parent somehow? - // TODO: remove this task from the child-task chain? - // } + fprintf(stderr, "[%s:%d](%s) completeTaskImpl, task = %p\n", __FILE_NAME__, __LINE__, __FUNCTION__, task); } /// The function that we put in the context of a simple task @@ -607,7 +612,10 @@ static inline bool isUnspecified(JobPriority priority) { } static inline bool taskIsStructured(JobFlags jobFlags) { - return jobFlags.task_isAsyncLetTask() || jobFlags.task_isGroupChildTask(); + return + jobFlags.task_isAsyncLetTask() || + jobFlags.task_isGroupChildTask() || + jobFlags.task_isPoolChildTask(); } static inline bool taskIsUnstructured(TaskCreateFlags createFlags, JobFlags jobFlags) { @@ -619,7 +627,8 @@ static inline bool taskIsDetached(TaskCreateFlags createFlags, JobFlags jobFlags } static std::pair amountToAllocateForHeaderAndTask( - const AsyncTask *parent, const TaskGroup *group, + const AsyncTask *parent, + const TaskGroup *group, const TaskPool *pool, const Metadata *futureResultType, size_t initialContextSize) { // Figure out the size of the header. size_t headerSize = sizeof(AsyncTask); @@ -627,8 +636,13 @@ static std::pair amountToAllocateForHeaderAndTask( headerSize += sizeof(AsyncTask::ChildFragment); } if (group) { + assert(!pool && "group child task also has pool set; those two are exclusive"); headerSize += sizeof(AsyncTask::GroupChildFragment); } + if (pool) { + assert(!group && "pool child task also has group set; those two are exclusive"); + headerSize += sizeof(AsyncTask::PoolChildFragment); + } if (futureResultType) { headerSize += FutureFragment::fragmentSize(headerSize, futureResultType); // Add the future async context prefix. @@ -670,6 +684,7 @@ static AsyncTaskAndContext swift_task_create_commonImpl( // Collect the options we know about. ExecutorRef executor = ExecutorRef::generic(); TaskGroup *group = nullptr; + TaskPool *pool = nullptr; AsyncLet *asyncLet = nullptr; bool hasAsyncLetResultBuffer = false; RunInlineTaskOptionRecord *runInlineOption = nullptr; @@ -681,10 +696,18 @@ static AsyncTaskAndContext swift_task_create_commonImpl( case TaskOptionRecordKind::TaskGroup: group = cast(option)->getGroup(); + fprintf(stderr, "[%s:%d](%s) make pool child task; group = %p\n", __FILE_NAME__, __LINE__, __FUNCTION__, group); assert(group && "Missing group"); jobFlags.task_setIsGroupChildTask(true); break; + case TaskOptionRecordKind::TaskPool: + pool = cast(option)->getPool(); + fprintf(stderr, "[%s:%d](%s) make pool child task; pool = %p\n", __FILE_NAME__, __LINE__, __FUNCTION__, pool); + assert(pool && "Missing pool"); + jobFlags.task_setIsPoolChildTask(true); + break; + case TaskOptionRecordKind::AsyncLet: asyncLet = cast(option)->getAsyncLet(); assert(asyncLet && "Missing async let storage"); @@ -715,10 +738,13 @@ static AsyncTaskAndContext swift_task_create_commonImpl( } } - // Add to the task group, if requested. - if (taskCreateFlags.addPendingGroupTaskUnconditionally()) { - assert(group && "Missing group"); - swift_taskGroup_addPending(group, /*unconditionally=*/true); + // Add to the task group or pool, if requested. + if (taskCreateFlags.addPendingGroupTaskUnconditionally()) { // TODO: rename the flag + if (group) { + swift_taskGroup_addPending(group, /*unconditionally=*/true); + } else if (pool) { + swift_taskPool_addPending(pool, /*unconditionally=*/true); + } } AsyncTask *parent = nullptr; @@ -774,7 +800,7 @@ static AsyncTaskAndContext swift_task_create_commonImpl( // priority } else { // Is a structured concurrency child task. Must have a parent. - assert((asyncLet || group) && parent); + assert((asyncLet || group || pool) && parent); SWIFT_TASK_DEBUG_LOG("Creating an structured concurrency task from %p", currentTask); if (isUnspecified(basePriority)) { @@ -793,7 +819,7 @@ static AsyncTaskAndContext swift_task_create_commonImpl( // Task will be created with escalated priority = base priority. We will // update the escalated priority with the right rules in - // updateNewChildWithParentAndGroupState when we link the child into + // updateNewChildWithParentAndContainerState when we link the child into // the parent task/task group since we'll have the right // synchronization then. } @@ -806,7 +832,7 @@ static AsyncTaskAndContext swift_task_create_commonImpl( size_t headerSize, amountToAllocate; std::tie(headerSize, amountToAllocate) = amountToAllocateForHeaderAndTask( - parent, group, futureResultType, initialContextSize); + parent, group, pool, futureResultType, initialContextSize); unsigned initialSlabSize = 512; @@ -912,6 +938,13 @@ static AsyncTaskAndContext swift_task_create_commonImpl( ::new (groupChildFragment) AsyncTask::GroupChildFragment(group); } + // Initialize the pool child fragment if applicable. + if (pool) { + fprintf(stderr, "[%s:%d](%s) add new task = %p, to pool = %p\n", __FILE_NAME__, __LINE__, __FUNCTION__, task, pool); + auto poolChildFragment = task->poolChildFragment(); + ::new (poolChildFragment) AsyncTask::PoolChildFragment(pool); + } + // Initialize the future fragment if applicable. if (futureResultType) { assert(task->isFuture()); @@ -972,10 +1005,11 @@ static AsyncTaskAndContext swift_task_create_commonImpl( initialContext->Parent = nullptr; concurrency::trace::task_create( - task, parent, group, asyncLet, + task, parent, group, pool, asyncLet, static_cast(task->Flags.getPriority()), task->Flags.task_isChildTask(), task->Flags.task_isFuture(), - task->Flags.task_isGroupChildTask(), task->Flags.task_isAsyncLetTask()); + task->Flags.task_isGroupChildTask(), task->Flags.task_isPoolChildTask(), + task->Flags.task_isAsyncLetTask()); // Attach to the group, if needed. if (group) { @@ -991,6 +1025,20 @@ static AsyncTaskAndContext swift_task_create_commonImpl( #endif } + // Attach to the pool, if needed. + if (pool) { + swift_taskPool_attachChild(pool, task); +#if SWIFT_CONCURRENCY_TASK_TO_THREAD_MODEL + // We need to take a retain here to keep the child task for the task pool + // alive. In the non-task-to-thread model, we'd always take this retain + // below since we'd enqueue the child task. But since we're not going to be + // enqueueing the child task in this model, we need to take this +1 to + // balance out the release that exists after the task pool child task + // creation + swift_retain(task); +#endif + } + // If we're supposed to copy task locals, do so now. if (taskCreateFlags.copyTaskLocals()) { swift_task_localsCopyTo(task); @@ -1054,7 +1102,8 @@ void swift::swift_task_run_inline(OpaqueValue *result, void *closureAFP, size_t candidateAllocationBytes = SWIFT_TASK_RUN_INLINE_INITIAL_CONTEXT_BYTES; size_t minimumAllocationSize = amountToAllocateForHeaderAndTask(/*parent=*/nullptr, /*group=*/nullptr, - futureResultType, closureContextSize) + /*pool=*/nullptr, futureResultType, + closureContextSize) .second; void *allocation = nullptr; size_t allocationBytes = 0; diff --git a/stdlib/public/Concurrency/TaskGroup.cpp b/stdlib/public/Concurrency/TaskGroup.cpp index e9cd764acbdd6..238217a17d620 100644 --- a/stdlib/public/Concurrency/TaskGroup.cpp +++ b/stdlib/public/Concurrency/TaskGroup.cpp @@ -575,6 +575,9 @@ static void fillGroupNextResult(TaskFutureWaitAsyncContext *context, case PollStatus::MustWait: assert(false && "filling a waiting status?"); return; + case PollStatus::Empty: + assert(false && "filling from empty status?"); + return; case PollStatus::Error: { context->fillWithError(reinterpret_cast(result.storage)); @@ -769,7 +772,7 @@ SWIFT_CC(swiftasync) static void workaround_function_swift_taskGroup_wait_next_t #endif // ============================================================================= -// ==== group.next() implementation (wait_next and groupPoll) ------------------ +// ==== group.next() implementation (wait_next) -------------------------------- SWIFT_CC(swiftasync) static void swift_taskGroup_wait_next_throwingImpl( diff --git a/stdlib/public/Concurrency/TaskGroup.swift b/stdlib/public/Concurrency/TaskGroup.swift index ae2a961922a69..0802ad8b2e59b 100644 --- a/stdlib/public/Concurrency/TaskGroup.swift +++ b/stdlib/public/Concurrency/TaskGroup.swift @@ -12,7 +12,6 @@ import Swift @_implementationOnly import _SwiftConcurrencyShims -import Darwin // ==== TaskGroup -------------------------------------------------------------- diff --git a/stdlib/public/Concurrency/TaskPool.cpp b/stdlib/public/Concurrency/TaskPool.cpp new file mode 100644 index 0000000000000..2ef26edcc904a --- /dev/null +++ b/stdlib/public/Concurrency/TaskPool.cpp @@ -0,0 +1,910 @@ +//===--- TaskPool.cpp - Task Pools --------------------------------------===// +// +// This source file is part of the Swift.org open source project +// +// Copyright (c) 2014 - 2021 Apple Inc. and the Swift project authors +// Licensed under Apache License v2.0 with Runtime Library Exception +// +// See https://swift.org/LICENSE.txt for license information +// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors +// +//===----------------------------------------------------------------------===// +// +// Object management for child tasks that are children of a task group. +// +//===----------------------------------------------------------------------===// + +#include "../CompatibilityOverride/CompatibilityOverride.h" + +#include "Debug.h" +#include "TaskPrivate.h" +#include "bitset" +#include "string" +#include "swift/ABI/HeapObject.h" +#include "swift/ABI/Metadata.h" +#include "swift/ABI/Task.h" +#include "swift/ABI/TaskPool.h" +#include "swift/Basic/RelativePointer.h" +#include "swift/Basic/STLExtras.h" +#include "swift/Runtime/Concurrency.h" +#include "swift/Runtime/Config.h" +#include "swift/Runtime/HeapObject.h" +#include "swift/Threading/Mutex.h" +#include +#include + +#if !SWIFT_STDLIB_SINGLE_THREADED_CONCURRENCY +#include +#endif + +#include +#if SWIFT_CONCURRENCY_ENABLE_DISPATCH +#include +#endif + +#if !defined(_WIN32) && !defined(__wasi__) && __has_include() +#include +#endif + +using namespace swift; + +/******************************************************************************/ +/*************************** TASK POOL ***************************************/ +/******************************************************************************/ + +using FutureFragment = AsyncTask::FutureFragment; + +namespace { +class TaskStatusRecord; + +class TaskPoolImpl: public TaskPoolTaskStatusRecord { +public: + /// Describes the status of the group. + enum class ReadyStatus : uintptr_t { + /// The task group is empty, no tasks are pending. + /// Return immediately, there is no point in suspending. + /// + /// The storage is not accessible. + Empty = 0b00, + + // not used: 0b01; same value as the PollStatus MustWait, + // which does not make sense for the ReadyStatus + + /// The future has completed with result (of type \c resultType). + Success = 0b10, + + /// The future has completed by throwing an error (an \c Error + /// existential). + Error = 0b11, + }; + + enum class PollStatus : uintptr_t { + /// The group is known to be empty and we can immediately return nil. + Empty = 0b00, + + /// The task has been enqueued to the groups wait queue. + MustWait = 0b01, + + /// The task has completed with result (of type \c resultType). + Success = 0b10, + + /// The task has completed by throwing an error (an \c Error existential). + Error = 0b11, + }; + + /// The result of waiting on the TaskPoolImpl. + struct PollResult { + PollStatus status; // TODO: pack it into storage pointer or not worth it? + + static PollResult get(AsyncTask *asyncTask, bool hadErrorResult) { + // A TaskPool task is always Void, so we don't even have to collect the result from its future fragment. + return PollResult{ + /*status*/ hadErrorResult ? + PollStatus::Error : + PollStatus::Success + }; + } + }; + + /// An item within the pending queue. + struct PendingQueueItem { + AsyncTask * const storage; + + AsyncTask *getTask() const { + return storage; + } + + static PendingQueueItem get(AsyncTask *task) { + assert(task == nullptr || task->isFuture()); + return PendingQueueItem{task}; + } + }; + + struct PoolStatus { + static const uint64_t cancelled = 0b1000000000000000000000000000000000000000000000000000000000000000; + static const uint64_t waiting = 0b0100000000000000000000000000000000000000000000000000000000000000; + + // 62 bits for pending tasks counter + static const uint64_t maskPending = 0b0011111111111111111111111111111111111111111111111111111111111111; + static const uint64_t onePendingTask = 0b0000000000000000000000000000000000000000000000000000000000000001; + + uint64_t status; + + bool isCancelled() { + return (status & cancelled) > 0; + } + + bool hasWaitingTask() { + return (status & waiting) > 0; + } + + unsigned int pendingTasks() { + return (status & maskPending); + } + + bool isEmpty() { + return pendingTasks() == 0; + } + + /// Status value decrementing the Ready, Pending and Waiting counters by one. + PoolStatus completingPendingWaiting() { + assert(pendingTasks() && + "can only complete waiting task when pending tasks available"); + assert(hasWaitingTask() && + "can only complete waiting task when waiting task available"); + return PoolStatus{status - waiting - onePendingTask}; + } + + PoolStatus completingWaiting() { + assert(hasWaitingTask() && + "must have waiting task to complete it"); + return PoolStatus{status - waiting}; + } + + /// Pretty prints the status, as follows: + /// PoolStatus{ P:{pending tasks} W:{waiting tasks} {binary repr} } + std::string to_string() { + std::string str; + str.append("PoolStatus{ "); + str.append("C:"); // cancelled + str.append(isCancelled() ? "y " : "n "); + str.append("W:"); // has waiting task + str.append(hasWaitingTask() ? "y " : "n "); + str.append(" P:"); // pending + str.append(std::to_string(pendingTasks())); + str.append(" " + std::bitset<64>(status).to_string()); + str.append(" }"); + return str; + } + + /// Initially there are no waiting and no pending tasks. + static const PoolStatus initial() { + return PoolStatus{0}; + }; + }; + +private: +#if SWIFT_STDLIB_SINGLE_THREADED_CONCURRENCY || SWIFT_CONCURRENCY_TASK_TO_THREAD_MODEL + // Synchronization is simple here. In a single threaded mode, all swift tasks + // run on a single thread so no coordination is needed. In a task-to-thread + // model, only the parent task which created the task group can + // + // (a) add child tasks to a group + // (b) run the child tasks + // + // So we shouldn't need to worry about coordinating between child tasks and + // parents in a task group + void lock() const {} + void unlock() const {} +#else + // TODO: move to lockless via the status atomic (make readyQueue an mpsc_queue_t) + mutable std::mutex mutex_; + + void lock() const { mutex_.lock(); } + void unlock() const { mutex_.unlock(); } +#endif + + /// Used for queue management, counting number of waiting and ready tasks + std::atomic status; + + /// The task currently waiting on `group.next()`. Since only the owning + /// task can ever be waiting on a group, this is just either a reference + /// to that task or null. + std::atomic waitQueue; + + + friend class ::swift::AsyncTask; + +public: + const Metadata *voidType; + + explicit TaskPoolImpl(const Metadata *T) + : TaskPoolTaskStatusRecord(), + status(PoolStatus::initial().status), + waitQueue(nullptr), + voidType(T) + {} + + TaskPoolTaskStatusRecord *getTaskRecord() { + return reinterpret_cast(this); + } + + /// Destroy the storage associated with the group. + void destroy(); + + bool isEmpty() { + auto oldStatus = PoolStatus{status.load(std::memory_order_relaxed)}; + return oldStatus.pendingTasks() == 0; + } + + bool isCancelled() { + auto oldStatus = PoolStatus{status.load(std::memory_order_relaxed)}; + return oldStatus.isCancelled(); + } + + /// Cancel the task group and all tasks within it. + /// + /// Returns `true` if this is the first time cancelling the group, false otherwise. + bool cancelAll(); + + PoolStatus statusCancel() { + auto old = status.fetch_or(PoolStatus::cancelled, + std::memory_order_relaxed); + return PoolStatus{old}; + } + + /// Returns *assumed* new status, including the just performed +1. + PoolStatus statusMarkWaitingAssumeAcquire() { + auto old = status.fetch_or(PoolStatus::waiting, std::memory_order_acquire); + return PoolStatus{old | PoolStatus::waiting}; + } + + PoolStatus statusRemoveWaiting() { + auto old = status.fetch_and(~PoolStatus::waiting, + std::memory_order_release); + return PoolStatus{old}; + } + + /// Decrement the pending task count. + /// Returns *assumed* new status, including the just performed +1. + PoolStatus statusDecrementPendingAssumeAcquire() { + auto old = status.fetch_sub(PoolStatus::onePendingTask, + std::memory_order_acquire); + assert(PoolStatus{old}.pendingTasks() > 0 && "attempted to decrement pending count when it was 0 already"); + return PoolStatus{old - PoolStatus::onePendingTask}; + } + + /// Increment the pending task count. + /// + /// Returns *assumed* new status, including the just performed -1. + PoolStatus statusIncrementPendingAssumeAcquire() { + auto old = status.fetch_add(PoolStatus::onePendingTask, + std::memory_order_acquire); + return PoolStatus{old + PoolStatus::onePendingTask}; + } + + /// Similar to decrementing the pending count, however does so 'relaxed'. + /// Used to undo an optimistic increment, when the pool already is cancelled. + /// + /// Returns *assumed* new status, including the just performed -1. + PoolStatus statusUndoIncrementPendingAssumeRelaxed() { + auto o = status.fetch_sub(PoolStatus::onePendingTask, + std::memory_order_relaxed); + return PoolStatus{o - PoolStatus::onePendingTask}; + } + + /// Add a single pending task to the status counter. + /// This is used to implement next() properly, as we need to know if there + /// are pending tasks worth suspending/waiting for or not. + /// + /// Note that the group does *not* store child tasks at all, as they are + /// stored in the `TaskPoolTaskStatusRecord` inside the current task, that + /// is currently executing the group. Here we only need the counts of + /// pending/ready tasks. + /// + /// If the `unconditionally` parameter is `true` the operation always successfully + /// adds a pending task, even if the group is cancelled. If the unconditionally + /// flag is `false`, the added pending count will be *reverted* before returning. + /// This is because we will NOT add a task to a cancelled group, unless doing + /// so unconditionally. + /// + /// Returns *assumed* new status, including the just performed +1. + PoolStatus statusAddPendingTaskRelaxed(bool unconditionally) { + auto assumed = statusIncrementPendingAssumeAcquire(); + + if (!unconditionally && assumed.isCancelled()) { + // revert that add, it was meaningless + return statusUndoIncrementPendingAssumeRelaxed(); + } + fprintf(stderr, "[%s:%d](%s) status after add: %s\n", __FILE_NAME__, __LINE__, __FUNCTION__, assumed.to_string().c_str()); + return assumed; + } + + PoolStatus statusLoadRelaxed() { + return PoolStatus{status.load(std::memory_order_relaxed)}; + } + +// /// Compare-and-set old status to a status derived from the old one, +// /// by simultaneously decrementing one Pending and one Waiting tasks. +// /// +// /// This is used to atomically perform a waiting task completion. +// bool statusCompletePendingReadyWaiting(PoolStatus &old) { +// return status.compare_exchange_strong( +// old.status, old.completingPendingReadyWaiting().status, +// /*success*/ std::memory_order_relaxed, +// /*failure*/ std::memory_order_relaxed); +// } +// +// bool statusCompletePendingReady(PoolStatus &old) { +// return status.compare_exchange_strong( +// old.status, old.completingPendingReady().status, +// /*success*/ std::memory_order_relaxed, +// /*failure*/ std::memory_order_relaxed); +// } + + + /// Offer result of a task into this task pool. + /// + /// Unlike a task group, result values are never stored and we immediately + /// release the task after decrementing the `pending` count in the pool's status. + /// + /// If the TaskPool is currently "draining" tasks (i.e. its body has completed), + /// there may be a `waiting` task. If so, and this is the last pending task, + /// this offer will resume it, allowing the TaskPool to complete and destroy itself. + void offer(AsyncTask *completed, AsyncContext *context); + + /// A `TaskPool` is not able to wait on individual completions, + /// instead, it can only await on "all pending tasks have been processed". + /// + /// + /// If unable to complete the waiting task immediately (with an readily + /// available completed task), either returns an `PollStatus::Empty` + /// result if it is known that no pending tasks in the group, + /// or a `PollStatus::MustWait` result if there are tasks in flight + /// and the waitingTask eventually be woken up by a completion. + PollResult waitAll(AsyncTask *waitingTask); + +//private: +// /// Enqueue the completed task onto ready queue if there are no waiting tasks yet +// PoolStatus completeTask(AsyncTask *completedTask); +}; + +} // end anonymous namespace + +/******************************************************************************/ +/************************ TASK POOL IMPLEMENTATION ***************************/ +/******************************************************************************/ + +using ReadyStatus = TaskPoolImpl::ReadyStatus; +using PollResult = TaskPoolImpl::PollResult; +using PollStatus = TaskPoolImpl::PollStatus; + +static_assert(sizeof(TaskPoolImpl) <= sizeof(TaskPool) && + alignof(TaskPoolImpl) <= alignof(TaskPool), + "TaskPoolImpl doesn't fit in TaskPool"); + +static TaskPoolImpl *asImpl(TaskPool *group) { + return reinterpret_cast(group); +} + +static TaskPool *asAbstract(TaskPoolImpl *group) { + return reinterpret_cast(group); +} + +TaskPoolTaskStatusRecord * TaskPool::getTaskRecord() { + return asImpl(this)->getTaskRecord(); +} + +// ============================================================================= +// ==== initialize ------------------------------------------------------------- + +// Initializes into the preallocated _pool an actual TaskPoolImpl. +SWIFT_CC(swift) +static void swift_taskPool_initializeImpl(TaskPool *pool, const Metadata *Void) { + SWIFT_TASK_DEBUG_LOG("creating task pool = %p", pool); + + TaskPoolImpl *impl = ::new (pool) TaskPoolImpl(Void); + auto record = impl->getTaskRecord(); + assert(impl == record && "the pool IS the task record"); + + // ok, now that the group actually is initialized: attach it to the task + addStatusRecord(record, [&](ActiveTaskStatus parentStatus) { + // If the task has already been cancelled, reflect that immediately in + // the group's status. + if (parentStatus.isCancelled()) { + impl->statusCancel(); + } + return true; + }); +} + +// ============================================================================= +// ==== child task management -------------------------------------------------- + +void TaskPool::addChildTask(AsyncTask *child) { + SWIFT_TASK_DEBUG_LOG("attach child task = %p to pool = %p", child, this); + + // Add the child task to this task group. The corresponding removal + // won't happen until the parent task successfully polls for this child + // task, either synchronously in poll (if a task is available + // synchronously) or asynchronously in offer (otherwise). In either + // case, the work ends up being non-concurrent with the parent task. + + // The task status record lock is held during this operation, which + // prevents us from racing with cancellation or escalation. We don't + // need to acquire the task group lock because the child list is only + // accessed under the task status record lock. + auto record = asImpl(this)->getTaskRecord(); + record->attachChild(child); +} + +void TaskPool::removeChildTask(AsyncTask *child) { + SWIFT_TASK_DEBUG_LOG("detach child task = %p from group = %p", child, this); + + auto record = asImpl(this)->getTaskRecord(); + + // The task status record lock is held during this operation, which + // prevents us from racing with cancellation or escalation. We don't + // need to acquire the task group lock because the child list is only + // accessed under the task status record lock. + record->detachChild(child); +} + +// ============================================================================= +// ==== destroy ---------------------------------------------------------------- +SWIFT_CC(swift) +static void swift_taskPool_destroyImpl(TaskPool *group) { + asImpl(group)->destroy(); +} + +void TaskPoolImpl::destroy() { + SWIFT_TASK_DEBUG_LOG("destroying task group = %p", this); + if (!this->isEmpty()) { + auto status = this->statusLoadRelaxed(); + SWIFT_TASK_DEBUG_LOG("destroying task group = %p, .pending = %d", + this, status.pendingTasks()); + } + assert(this->isEmpty() && "Attempted to destroy non-empty task group!"); + + // First, remove the group from the task and deallocate the record + removeStatusRecord(getTaskRecord()); + + // No need to drain our queue here, as by the time we call destroy, + // all tasks inside the group must have been awaited on already. + // This is done in Swift's withTaskPool function explicitly. + + // destroy the group's storage + this->~TaskPoolImpl(); +} + +// ============================================================================= +// ==== offer ------------------------------------------------------------------ + +void TaskPool::offer(AsyncTask *completedTask, AsyncContext *context) { + asImpl(this)->offer(completedTask, context); +} + +bool TaskPool::isCancelled() { + return asImpl(this)->isCancelled(); +} + +//static void fillPoolNextResult(TaskFutureWaitAsyncContext *context, +// PollResult result) { +// /// Fill in the result value +// switch (result.status) { +// case PollStatus::MustWait: +// assert(false && "filling a waiting status?"); +// return; +// +// case PollStatus::Error: { +// context->fillWithError(reinterpret_cast(result.storage)); +// return; +// } +// +// case PollStatus::Success: { +// // Initialize the result as an Optional. +// const Metadata *voidType = result.voidType; +// OpaqueValue *destPtr = context->successResultPointer; +// voidType->vw_storeEnumTagSinglePayload(destPtr, 1, 1); +// return; +// } +// } +//} + +static void fillPoolNextVoidResult(TaskFutureWaitAsyncContext *context, + const Metadata *voidType, + PollResult result) { + fprintf(stderr, "[%s:%d](%s) fill in void\n", __FILE_NAME__, __LINE__, __FUNCTION__); + /// Fill in the result value + switch (result.status) { + case PollStatus::MustWait: + assert(false && "filling a waiting status?"); + return; + + case PollStatus::Error: { + assert(false && "cannot have errors"); + return; + } + + case PollStatus::Success: { + // Initialize the result as an Optional. + // const Metadata *voidType = nullptr; // result.voidType; // FIXME: should be Void type + OpaqueValue *destPtr = context->successResultPointer; + // TODO: figure out a way to try to optimistically take the + // value out of the finished task's future, if there are no + // remaining references to it. + voidType->vw_initializeWithCopy(destPtr, nullptr); + voidType->vw_storeEnumTagSinglePayload(destPtr, 0, 1); + return; + } + + case PollStatus::Empty: { + // Initialize the result as a nil Optional. + // const Metadata *voidType = nullptr; // result.voidType; // FIXME: should be Void type + OpaqueValue *destPtr = context->successResultPointer; + voidType->vw_storeEnumTagSinglePayload(destPtr, 1, 1); + return; + } + } +} + +//// TaskPool is locked upon entry and exit +//TaskPoolImpl::PoolStatus TaskPoolImpl::completeTask(AsyncTask *completedTask) { +// SWIFT_TASK_DEBUG_LOG("pool does not retain tasks for their results; we're done here = %p", completedTask); +// // DO NOT RETAIN THE TASK. +// // We know it is Void, so we don't need to store the result; +// // By releasing tasks eagerly we're able to keep "infinite" task groups, +// // running, that never consume their values. Even more-so, +// +// return this->statusDecrementPendingAssumeAcquire(); +//} + +void TaskPoolImpl::offer(AsyncTask *completedTask, AsyncContext *context) { + assert(completedTask); + assert(completedTask->isFuture()); + assert(completedTask->hasChildFragment()); + assert(completedTask->hasPoolChildFragment()); + assert(completedTask->poolChildFragment()->getPool() == asAbstract(this)); + SWIFT_TASK_DEBUG_LOG("offer task %p to pool %p", completedTask, this); + + lock(); // TODO: remove fragment lock, and use status for synchronization + + // Immediately decrement the pending count; we do not keep track of "ready" tasks and never store them; + // This is different from a task group, which has to keep the pending count and add +1 "ready" when offered to. + auto assumed = statusDecrementPendingAssumeAcquire(); + SWIFT_TASK_DEBUG_LOG("pool %p, remaining pending: %d", this, assumed.pendingTasks()); + + auto asyncContextPrefix = reinterpret_cast( + reinterpret_cast(context) - sizeof(FutureAsyncContextPrefix)); + bool hadErrorResult = false; + auto errorObject = asyncContextPrefix->errorResult; + if (errorObject) { + // instead, we need to enqueue this result: + hadErrorResult = true; + } + + SWIFT_TASK_DEBUG_LOG("pool(%p) child task=%p completed, detach", this, completedTask); + _swift_taskPool_detachChild(asAbstract(this), completedTask); + + // ==== a) has waiting task. + // A TaskPool only has a waiting task while terminating, and that task shall only be resumed once + // all tasks have been processed. Only resume the waiting task if this was the last pending task. + if (assumed.hasWaitingTask() && assumed.pendingTasks() == 0) { + auto waitingTask = waitQueue.load(std::memory_order_acquire); + SWIFT_TASK_DEBUG_LOG("group has waiting task = %p, complete with = %p", + waitingTask, completedTask); + while (true) { + // ==== a) run waiting task directly ------------------------------------- + assert(assumed.hasWaitingTask()); + assert(assumed.pendingTasks() && "offered to pool with no pending tasks!"); + // We are the "first" completed task to arrive, + // and since there is a task waiting we immediately claim and complete it. + if (waitQueue.compare_exchange_strong( + waitingTask, nullptr, + /*success*/ std::memory_order_release, + /*failure*/ std::memory_order_acquire)) { + +#if SWIFT_CONCURRENCY_TASK_TO_THREAD_MODEL + // In the task-to-thread model, child tasks are always actually + // run synchronously on the parent task's thread. For task groups + // specifically, this means that poll() will pick a child task + // that was added to the group and run it to completion as a + // subroutine. Therefore, when we enter offer(), we know that + // the parent task is waiting and we can just return to it. + + // The task-to-thread logic in poll() currently expects the child + // task to enqueue itself instead of just filling in the result in + // the waiting task. This is a little wasteful; there's no reason + // we can't just have the parent task set itself up as a waiter. + // But since it's what we're doing, we basically take the same + // path as we would if there wasn't a waiter. +// completeTask(completedTask); + unlock(); // TODO: remove fragment lock, and use status for synchronization + return; + +#else /* SWIFT_CONCURRENCY_TASK_TO_THREAD_MODEL */ + // Run the task. + auto result = PollResult::get(completedTask, hadErrorResult); + + unlock(); // TODO: remove fragment lock, and use status for synchronization + + // Remove the child from the task group's running tasks list. + // The parent task isn't currently running (we're about to wake + // it up), so we're still synchronous with it. We can safely + // acquire our parent's status record lock here (which would + // ordinarily run the risk of deadlock, since e.g. cancellation + // does a parent -> child traversal while recursively holding + // locks) because we know that the child task is completed and + // we can't be holding its locks ourselves. + _swift_taskPool_detachChild(asAbstract(this), completedTask); + + auto waitingContext = + static_cast( + waitingTask->ResumeContext); + + fillPoolNextVoidResult(waitingContext, voidType, result); + + _swift_tsan_acquire(static_cast(waitingTask)); + // TODO: allow the caller to suggest an executor + waitingTask->flagAsAndEnqueueOnExecutor(ExecutorRef::generic()); + + // completedTask will be released by the remainder of its + // completion function. + return; +#endif + } + } + llvm_unreachable("should have enqueued and returned."); + } else { + // ==== b) enqueue completion ------------------------------------------------ + // + // else, no-one was waiting (yet), so we have to instead enqueue to the message + // queue when a task polls during next() it will notice that we have a value + // ready for it, and will process it immediately without suspending. + assert(!waitQueue.load(std::memory_order_relaxed)); + + // completeTask(completedTask); + unlock(); // TODO: remove fragment lock, and use status for synchronization + } + + return; +} + +SWIFT_CC(swiftasync) +static void +TASK_POOL_wait_resume_adapter(SWIFT_ASYNC_CONTEXT AsyncContext *_context) { + + auto context = static_cast(_context); + auto resumeWithError = + reinterpret_cast(context->ResumeParent); + return resumeWithError(context->Parent, context->errorResult); +} + +#ifdef __ARM_ARCH_7K__ +__attribute__((noinline)) +SWIFT_CC(swiftasync) static void workaround_function_swift_taskPool_waitAllImpl( + OpaqueValue *result, SWIFT_ASYNC_CONTEXT AsyncContext *callerContext, + TaskPool *_pool, + ThrowingTaskFutureWaitContinuationFunction resumeFunction, + AsyncContext *callContext) { + // Make sure we don't eliminate calls to this function. + asm volatile("" // Do nothing. + : // Output list, empty. + : "r"(result), "r"(callerContext), "r"(_pool) // Input list. + : // Clobber list, empty. + ); + return; +} +#endif + +// ============================================================================= +// ==== pool.waitAll() implementation ------------------------------------------ + +SWIFT_CC(swiftasync) +static void swift_taskPool_waitAllImpl( + OpaqueValue *resultPointer, SWIFT_ASYNC_CONTEXT AsyncContext *callerContext, + TaskPool *_pool, + ThrowingTaskFutureWaitContinuationFunction *resumeFunction, + AsyncContext *rawContext) { + fprintf(stderr, "[%s:%d](%s) wait all; pool = %p\n", __FILE_NAME__, __LINE__, __FUNCTION__, _pool); + auto waitingTask = swift_task_getCurrent(); + waitingTask->ResumeTask = TASK_POOL_wait_resume_adapter; + waitingTask->ResumeContext = rawContext; + + auto context = static_cast(rawContext); + context->ResumeParent = + reinterpret_cast(resumeFunction); + context->Parent = callerContext; + context->errorResult = nullptr; + context->successResultPointer = resultPointer; + + auto pool = asImpl(_pool); + assert(pool && "swift_taskPool_waitAll was passed context without pool!"); + + PollResult polled = pool->waitAll(waitingTask); + switch (polled.status) { + case PollStatus::MustWait: + SWIFT_TASK_DEBUG_LOG("poll pool = %p, no ready tasks, waiting task = %p", + pool, waitingTask); + // The waiting task has been queued on the channel, + // there were pending tasks so it will be woken up eventually. +#ifdef __ARM_ARCH_7K__ + return workaround_function_swift_taskPool_waitAllImpl( + resultPointer, callerContext, _pool, resumeFunction, rawContext); +#else /* __ARM_ARCH_7K__ */ + return; +#endif /* __ARM_ARCH_7K__ */ + + case PollStatus::Empty: + case PollStatus::Error: + case PollStatus::Success: + SWIFT_TASK_DEBUG_LOG("[pool:%p] poll, task = %p", pool, waitingTask); +// if (pool->eagerlyReleaseCompleteTasks) { + fillPoolNextVoidResult(context, pool->voidType, polled); +// } else { +// fillPoolNextResult(context, polled); +// } +// if (auto completedTask = polled.retainedTask) { +// // Remove the child from the task pool's running tasks list. +// _swift_taskPool_detachChild(asAbstract(pool), completedTask); +// +// // Balance the retain done by completeTask. +// swift_release(completedTask); +// } + + return waitingTask->runInFullyEstablishedContext(); + } +} + +PollResult TaskPoolImpl::waitAll(AsyncTask *waitingTask) { + lock(); // TODO: remove pool lock, and use status for synchronization + SWIFT_TASK_DEBUG_LOG("[pool:%p], waitAll pending; status = %s", this, statusLoadRelaxed().to_string().c_str()); + + PollResult result; + + // Have we suspended the task? + bool hasSuspended = false; + bool haveRunOneChildTaskInline = false; + +reevaluate_if_taskpool_has_results:; + auto assumed = statusMarkWaitingAssumeAcquire(); + // ==== 1) bail out early if no tasks are pending ---------------------------- + if (assumed.isEmpty()) { + SWIFT_TASK_DEBUG_LOG("[pool:%p] poll, is empty, no pending tasks", this); + // No tasks in flight, we know no tasks were submitted before this poll + // was issued, and if we parked here we'd potentially never be woken up. + // Bail out and return `nil` from `group.next()`. + statusRemoveWaiting(); + result.status = PollStatus::Empty; + // result.voidType = this->voidType; + unlock(); // TODO: remove pool lock, and use status for synchronization + return result; + } + + auto waitHead = waitQueue.load(std::memory_order_acquire); + + // ==== 3) Add to wait queue ------------------------------------------------- + _swift_tsan_release(static_cast(waitingTask)); + while (true) { + if (!hasSuspended) { + hasSuspended = true; + waitingTask->flagAsSuspended(); + } + // Put the waiting task at the beginning of the wait queue. + if (waitQueue.compare_exchange_strong( + waitHead, waitingTask, + /*success*/ std::memory_order_release, + /*failure*/ std::memory_order_acquire)) { + unlock(); // TODO: remove fragment lock, and use status for synchronization +#if SWIFT_CONCURRENCY_TASK_TO_THREAD_MODEL + // The logic here is paired with the logic in TaskPoolImpl::offer. Once + // we run the + auto oldTask = _swift_task_clearCurrent(); + assert(oldTask == waitingTask); + + auto childTask = getTaskRecord()->getFirstChild(); + assert(childTask != NULL); + + SWIFT_TASK_DEBUG_LOG("[RunInline] Switching away from running %p to now running %p", oldTask, childTask); + // Run the new task on the same thread now - this should run the new task to + // completion. All swift tasks in task-to-thread model run on generic + // executor + swift_job_run(childTask, ExecutorRef::generic()); + haveRunOneChildTaskInline = true; + + SWIFT_TASK_DEBUG_LOG("[RunInline] Switching back from running %p to now running %p", childTask, oldTask); + // We are back to being the parent task and now that we've run the child + // task, we should reevaluate parent task + _swift_task_setCurrent(oldTask); + goto reevaluate_if_taskpool_has_results; +#endif + // no ready tasks, so we must wait. + result.status = PollStatus::MustWait; + _swift_task_clearCurrent(); + return result; + } // else, try again + } +} + +// ============================================================================= +// ==== isEmpty ---------------------------------------------------------------- + +SWIFT_CC(swift) +static bool swift_taskPool_isEmptyImpl(TaskPool *pool) { + return asImpl(pool)->isEmpty(); +} + +// ============================================================================= +// ==== isCancelled ------------------------------------------------------------ + +SWIFT_CC(swift) +static bool swift_taskPool_isCancelledImpl(TaskPool *pool) { + return asImpl(pool)->isCancelled(); +} + +// ============================================================================= +// ==== cancelAll -------------------------------------------------------------- + +SWIFT_CC(swift) +static void swift_taskPool_cancelAllImpl(TaskPool *pool) { + asImpl(pool)->cancelAll(); +} + +bool TaskPoolImpl::cancelAll() { + SWIFT_TASK_DEBUG_LOG("cancel all tasks in pool = %p", this); + + // Flag the task group itself as cancelled. If this was already + // done, any existing child tasks should already have been cancelled, + // and cancellation should automatically flow to any new child tasks, + // so there's nothing else for us to do. + auto old = statusCancel(); + if (old.isCancelled()) { + return false; + } + + // Cancel all the child tasks. TaskPool is not a Sendable type, + // so cancelAll() can only be called from the owning task. This + // satisfies the precondition on cancelAllChildren(). + _swift_taskPool_cancelAllChildren(asAbstract(this)); + + return true; +} + +SWIFT_CC(swift) +static void swift_task_cancel_pool_child_tasksImpl(TaskPool *pool) { + // TaskPool is not a Sendable type, and so this operation (which is not + // currently exposed in the API) can only be called from the owning + // task. This satisfies the precondition on cancelAllChildren(). + _swift_taskPool_cancelAllChildren(pool); +} + +/// Cancel all the children of the given task group. +/// +/// The caller must guarantee that this is either called from the +/// owning task of the task group or while holding the owning task's +/// status record lock. +void swift::_swift_taskPool_cancelAllChildren(TaskPool *pool) { + SWIFT_TASK_DEBUG_LOG("pool(%p) cancel all children tasks", pool); + // Because only the owning task of the task group can modify the + // child list of a task group status record, and it can only do so + // while holding the owning task's status record lock, we do not need + // any additional synchronization within this function. + for (auto childTask: pool->getTaskRecord()->children()) + swift_task_cancel(childTask); +} + +// ============================================================================= +// ==== addPending ------------------------------------------------------------- + +SWIFT_CC(swift) +static bool swift_taskPool_addPendingImpl(TaskPool *pool, bool unconditionally) { + fprintf(stderr, "[%s:%d](%s) add pending task to pool = %p\n", __FILE_NAME__, __LINE__, __FUNCTION__, pool); + auto assumed = asImpl(pool)->statusAddPendingTaskRelaxed(unconditionally); + SWIFT_TASK_DEBUG_LOG("add pending %s to pool %p, tasks pending = %d", + unconditionally ? "unconditionally" : "", + pool, assumed.pendingTasks()); + return !assumed.isCancelled(); +} + +#define OVERRIDE_TASK_POOL COMPATIBILITY_OVERRIDE +#include COMPATIBILITY_OVERRIDE_INCLUDE_PATH diff --git a/stdlib/public/Concurrency/TaskPool.swift b/stdlib/public/Concurrency/TaskPool.swift new file mode 100644 index 0000000000000..ff8b0f86babee --- /dev/null +++ b/stdlib/public/Concurrency/TaskPool.swift @@ -0,0 +1,354 @@ +//===----------------------------------------------------------------------===// +// +// This source file is part of the Swift.org open source project +// +// Copyright (c) 2020 Apple Inc. and the Swift project authors +// Licensed under Apache License v2.0 with Runtime Library Exception +// +// See https://swift.org/LICENSE.txt for license information +// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors +// +//===----------------------------------------------------------------------===// + + +import Swift +@_implementationOnly import _SwiftConcurrencyShims +import Darwin + +// ==== TaskPool -------------------------------------------------------------- + +/// Starts a new scope that can contain a dynamic number of child tasks. +/// +/// A `TaskPool` is similar to a ``TaskGroup``, however its usage is fairly specialized for +/// submitting work using child tasks, where the results of those child tasks do not need to be collected. +/// +/// A task pool cannot be iterated over and its child tasks cannot be awaited on explicitly. +/// Task pool tasks are immediately removed from the pool as soon as they complete, +/// this is the primary difference from a task pool which stores results (and thus retains the results), +/// until they are consumed. +/// +/// Similarly to a `TaskGroup` a `TaskPool` awaits all tasks that are submitted to it before returning +/// from the `withTaskPool` call. +/// +/// Task Group Cancellation +/// ======================= +/// +/// You can cancel a task pool and all of its child tasks +/// by calling the `cancelAll()` method on the task pool, +/// or by canceling the task in which the pool is running. +/// +/// If you call `addTask(priority:operation:)` to create a new task in a canceled pool, +/// that task is immediately canceled after creation. +/// Alternatively, you can call `asyncUnlessCancelled(priority:operation:)`, +/// which doesn't create the task if the pool has already been canceled +/// Choosing between these two functions +/// lets you control how to react to cancellation within a pool: +/// some child tasks need to run regardless of cancellation, +/// but other tasks are better not even being created +/// when you know they can't produce useful results. +@available(SwiftStdlib 5.7, *) +@_unsafeInheritExecutor +@inlinable +public func withTaskPool( + returning returnType: PoolResult.Type = PoolResult.self, + body: (inout TaskPool) async throws -> PoolResult +) async rethrows -> PoolResult { + let _pool = Builtin.createTaskPool(Void.self) + var pool = TaskPool(pool: _pool) + + // Run the withTaskPool body. + do { + let result = try await body(&pool) + + await pool.awaitAllRemainingTasks() + Builtin.destroyTaskPool(_pool) + + return result + } catch { +// pool.cancelAll() + await pool.awaitAllRemainingTasks() + Builtin.destroyTaskPool(_pool) + + throw error + } +} + +/// A pool that contains dynamically created child tasks. +/// +/// To create a task pool, +/// call the `withTaskPool(returning:body:)` method. +/// +/// Don't use a task pool from outside the task where you created it. +/// In most cases, +/// the Swift type system prevents a task pool from escaping like that +/// because adding a child task to a task pool is a mutating operation, +/// and mutation operations can't be performed +/// from a concurrent execution context like a child task. +/// +/// For information about the language-level concurrency model that `TaskPool` is part of, +/// see [Concurrency][concurrency] in [The Swift Programming Language][tspl]. +/// +/// [concurrency]: https://docs.swift.org/swift-book/LanguageGuide/Concurrency.html +/// [tspl]: https://docs.swift.org/swift-book/ +/// +@available(SwiftStdlib 5.1, *) +@frozen +public struct TaskPool { + + @usableFromInline + internal let _pool: Builtin.RawPointer + + // No public initializers + @inlinable + init(pool: Builtin.RawPointer) { + self._pool = pool + } + +#if !SWIFT_STDLIB_TASK_TO_THREAD_MODEL_CONCURRENCY + /// Adds a child task to the pool. + /// + /// - Parameters: + /// - priority: The priority of the operation task. + /// Omit this parameter or pass `.unspecified` + /// to set the child task's priority to the priority of the pool. + /// - operation: The operation to execute as part of the task pool. + @_alwaysEmitIntoClient + public mutating func addTask( + priority: TaskPriority? = nil, + operation: __owned @Sendable @escaping () async -> Void + ) { +#if compiler(>=5.5) && $BuiltinCreateAsyncTaskInGroup +#if SWIFT_STDLIB_TASK_TO_THREAD_MODEL_CONCURRENCY + let flags = taskCreateFlags( + priority: priority, isChildTask: true, copyTaskLocals: false, + inheritContext: false, enqueueJob: false, + addPendingGroupTaskUnconditionally: true + ) +#else + let flags = taskCreateFlags( + priority: priority, isChildTask: true, copyTaskLocals: false, + inheritContext: false, enqueueJob: true, + addPendingGroupTaskUnconditionally: true + ) +#endif + + // Create the task in this pool. + _ = Builtin.createAsyncTaskInPool(flags, _pool, operation) +#else + fatalError("Unsupported Swift compiler") +#endif + } + + /// Adds a child task to the pool, unless the pool has been canceled. + /// + /// - Parameters: + /// - overridingPriority: The priority of the operation task. + /// Omit this parameter or pass `.unspecified` + /// to set the child task's priority to the priority of the pool. + /// - operation: The operation to execute as part of the task pool. + /// - Returns: `true` if the child task was added to the pool; + /// otherwise `false`. + @_alwaysEmitIntoClient + public mutating func addTaskUnlessCancelled( + priority: TaskPriority? = nil, + operation: __owned @Sendable @escaping () async -> Void + ) -> Bool { +#if compiler(>=5.5) && $BuiltinCreateAsyncTaskInGroup + let canAdd = _taskPoolAddPendingTask(pool: _pool, unconditionally: false) + + guard canAdd else { + // the pool is cancelled and is not accepting any new work + return false + } +#if SWIFT_STDLIB_TASK_TO_THREAD_MODEL_CONCURRENCY + let flags = taskCreateFlags( + priority: priority, isChildTask: true, copyTaskLocals: false, + inheritContext: false, enqueueJob: false, + addPendingGroupTaskUnconditionally: false + ) +#else + let flags = taskCreateFlags( + priority: priority, isChildTask: true, copyTaskLocals: false, + inheritContext: false, enqueueJob: true, + addPendingGroupTaskUnconditionally: false + ) +#endif + + // Create the task in this pool. + _ = Builtin.createAsyncTaskInGroup(flags, _pool, operation) + + return true +#else + fatalError("Unsupported Swift compiler") +#endif + } +#else + @available(SwiftStdlib 5.7, *) + @available(*, unavailable, message: "Unavailable in task-to-thread concurrency model", renamed: "addTask(operation:)") + public mutating func addTask( + priority: TaskPriority? = nil, + operation: __owned @Sendable @escaping () async -> Void + ) { + fatalError("Unavailable in task-to-thread concurrency model") + } + + /// Adds a child task to the pool. + /// + /// - Parameters: + /// - operation: The operation to execute as part of the task pool. + @_alwaysEmitIntoClient + public mutating func addTask( + operation: __owned @Sendable @escaping () async -> Void + ) { +#if compiler(>=5.5) && $BuiltinCreateAsyncTaskInGroup + let flags = taskCreateFlags( + priority: nil, isChildTask: true, copyTaskLocals: false, + inheritContext: false, enqueueJob: true, + addPendingGroupTaskUnconditionally: true + ) + + // Create the task in this pool. + _ = Builtin.createAsyncTaskInGroup(flags, _pool, operation) +#else + fatalError("Unsupported Swift compiler") +#endif + } + + @available(SwiftStdlib 5.7, *) + @available(*, unavailable, message: "Unavailable in task-to-thread concurrency model", renamed: "addTaskUnlessCancelled(operation:)") + public mutating func addTaskUnlessCancelled( + priority: TaskPriority? = nil, + operation: __owned @Sendable @escaping () async -> Void + ) -> Bool { + fatalError("Unavailable in task-to-thread concurrency model") + } + + /// Adds a child task to the pool, unless the pool has been canceled. + /// + /// - Parameters: + /// - operation: The operation to execute as part of the task pool. + /// - Returns: `true` if the child task was added to the pool; + /// otherwise `false`. + @_alwaysEmitIntoClient + public mutating func addTaskUnlessCancelled( + operation: __owned @Sendable @escaping () async -> Void + ) -> Bool { +#if compiler(>=5.5) && $BuiltinCreateAsyncTaskInGroup + let canAdd = _taskPoolAddPendingTask(pool: _pool, unconditionally: false) + + guard canAdd else { + // the pool is cancelled and is not accepting any new work + return false + } + + let flags = taskCreateFlags( + priority: nil, isChildTask: true, copyTaskLocals: false, + inheritContext: false, enqueueJob: true, + addPendingGroupTaskUnconditionally: false + ) + + // Create the task in this pool. + _ = Builtin.createAsyncTaskInPool(flags, _pool, operation) + + return true +#else + fatalError("Unsupported Swift compiler") +#endif + } +#endif + + /// Await all of the remaining tasks on this pool. + @usableFromInline + internal mutating func awaitAllRemainingTasks() async { + while let _: Void = try? await _taskPoolWaitAll(pool: _pool) {} + } + + /// Wait for all of the pool's remaining tasks to complete. + @_alwaysEmitIntoClient + public mutating func waitForAll() async { + await awaitAllRemainingTasks() + } + + /// A Boolean value that indicates whether the pool has any remaining tasks. + /// + /// At the start of the body of a `withTaskPool(of:returning:body:)` call, + /// the task pool is always empty. + /// It`s guaranteed to be empty when returning from that body + /// because a task pool waits for all child tasks to complete before returning. + /// + /// - Returns: `true` if the pool has no pending tasks; otherwise `false`. + public var isEmpty: Bool { + _taskPoolIsEmpty(_pool) + } + + /// Cancel all of the remaining tasks in the pool. + /// + /// After cancellation, + /// any new results from the tasks in this pool + /// are silently discarded. + /// + /// If you add a task to a pool after canceling the pool, + /// that task is canceled immediately after being added to the pool. + /// + /// This method can only be called by the parent task that created the task + /// pool. + /// + /// - SeeAlso: `Task.isCancelled` + /// - SeeAlso: `TaskPool.isCancelled` + public func cancelAll() { + _taskPoolCancelAll(pool: _pool) + } + + /// A Boolean value that indicates whether the pool was canceled. + /// + /// To cancel a pool, call the `TaskPool.cancelAll()` method. + /// + /// If the task that's currently running this pool is canceled, + /// the pool is also implicitly canceled, + /// which is also reflected in this property's value. + public var isCancelled: Bool { + return _taskPoolIsCancelled(pool: _pool) + } +} + +@available(SwiftStdlib 5.7, *) +@available(*, unavailable) +extension TaskPool: Sendable { } + +/// ==== ----------------------------------------------------------------------- + +@available(SwiftStdlib 5.1, *) +@_silgen_name("swift_taskPool_destroy") +func _taskPoolDestroy(pool: __owned Builtin.RawPointer) + +@available(SwiftStdlib 5.1, *) +@_silgen_name("swift_taskPool_addPending") +@usableFromInline +func _taskPoolAddPendingTask( + pool: Builtin.RawPointer, + unconditionally: Bool +) -> Bool + +@available(SwiftStdlib 5.1, *) +@_silgen_name("swift_taskPool_cancelAll") +func _taskPoolCancelAll(pool: Builtin.RawPointer) + +/// Checks ONLY if the pool was specifically canceled. +/// The task itself being canceled must be checked separately. +@available(SwiftStdlib 5.1, *) +@_silgen_name("swift_taskPool_isCancelled") +func _taskPoolIsCancelled(pool: Builtin.RawPointer) -> Bool + +@available(SwiftStdlib 5.1, *) +@_silgen_name("swift_taskPool_waitAll") +func _taskPoolWaitAll(pool: Builtin.RawPointer) async throws -> T? + +@available(SwiftStdlib 5.1, *) +@_silgen_name("swift_task_hasTaskPoolStatusRecord") +func _taskHasTaskPoolStatusRecord() -> Bool + +@available(SwiftStdlib 5.1, *) +@_silgen_name("swift_taskPool_isEmpty") +func _taskPoolIsEmpty( + _ pool: Builtin.RawPointer +) -> Bool diff --git a/stdlib/public/Concurrency/TaskPrivate.h b/stdlib/public/Concurrency/TaskPrivate.h index 7de4046bb6bf0..5987d554ec2e3 100644 --- a/stdlib/public/Concurrency/TaskPrivate.h +++ b/stdlib/public/Concurrency/TaskPrivate.h @@ -38,7 +38,7 @@ namespace swift { // Set to 1 to enable helpful debug spew to stderr // If this is enabled, tests with `swift_task_debug_log` requirement can run. -#if 0 +#if 1 #define SWIFT_TASK_DEBUG_LOG(fmt, ...) \ fprintf(stderr, "[%#lx] [%s:%d](%s) " fmt "\n", \ (unsigned long)Thread::current().platformThreadId(), __FILE__, \ @@ -49,6 +49,7 @@ namespace swift { class AsyncTask; class TaskGroup; +class TaskPool; /// Allocate task-local memory on behalf of a specific task, /// not necessarily the current one. Generally this should only be @@ -95,6 +96,19 @@ void _swift_taskGroup_cancelAllChildren(TaskGroup *group); /// should generally use a higher-level function. void _swift_taskGroup_detachChild(TaskGroup *group, AsyncTask *child); +/// Cancel all the child tasks that belong to `pool`. +/// +/// The caller must guarantee that this is either called from the +/// owning task of the task pool or while holding the owning task's +/// status record lock. +void _swift_taskPool_cancelAllChildren(TaskPool *pool); + +/// Remove the given task from the given task pool. +/// +/// This is an internal API; clients outside of the TaskPool implementation +/// should generally use a higher-level function. +void _swift_taskPool_detachChild(TaskPool *pool, AsyncTask *child); + /// release() establishes a happens-before relation with a preceding acquire() /// on the same address. void _swift_tsan_acquire(void *addr); @@ -864,9 +878,10 @@ bool addStatusRecord(TaskStatusRecord *record, /// A helper function for updating a new child task that is created with /// information from the parent or the group that it was going to be added to. SWIFT_CC(swift) -void updateNewChildWithParentAndGroupState(AsyncTask *child, - ActiveTaskStatus parentStatus, - TaskGroup *group); +void updateNewChildWithParentAndContainerState(AsyncTask *child, + ActiveTaskStatus parentStatus, + TaskGroup *group, + TaskPool *pool); } // end namespace swift diff --git a/stdlib/public/Concurrency/TaskStatus.cpp b/stdlib/public/Concurrency/TaskStatus.cpp index 1846c71732161..d43720232bfe5 100644 --- a/stdlib/public/Concurrency/TaskStatus.cpp +++ b/stdlib/public/Concurrency/TaskStatus.cpp @@ -353,9 +353,10 @@ static bool swift_task_hasTaskGroupStatusRecordImpl() { /// parent's task status record lock. When called to link a child into a task /// group, this holds the parent's task status record lock. SWIFT_CC(swift) -void swift::updateNewChildWithParentAndGroupState(AsyncTask *child, - ActiveTaskStatus parentStatus, - TaskGroup *group) { +void swift::updateNewChildWithParentAndContainerState(AsyncTask *child, + ActiveTaskStatus parentStatus, + TaskGroup *group, + TaskPool *pool) { // We can take the fast path of just modifying the ActiveTaskStatus in the // child task since we know that it won't have any task status records and // cannot be accessed by anyone else since it hasn't been linked in yet. @@ -366,7 +367,9 @@ void swift::updateNewChildWithParentAndGroupState(AsyncTask *child, auto newChildTaskStatus = oldChildTaskStatus; - if (parentStatus.isCancelled() || (group && group->isCancelled())) { + if (parentStatus.isCancelled() || + (group && group->isCancelled()) || + (pool && pool->isCancelled())) { newChildTaskStatus = newChildTaskStatus.withCancelled(); } @@ -401,7 +404,34 @@ static void swift_taskGroup_attachChildImpl(TaskGroup *group, // task status record - see also asyncLet_addImpl. Since we attach a // child task to a TaskGroupRecord instead, we synchronize on the // parent's task status and then update the child. - updateNewChildWithParentAndGroupState(child, parentStatus, group); + updateNewChildWithParentAndContainerState(child, parentStatus, group, /*pool=*/nullptr); + }); +} + +SWIFT_CC(swift) +static void swift_taskPool_attachChildImpl(TaskPool *pool, + AsyncTask *child) { + + // We are always called from the context of the parent + // + // Acquire the status record lock of parent - we want to synchronize with + // concurrent cancellation or escalation as we're adding new tasks to the + // group. + auto parent = child->childFragment()->getParent(); + assert(parent == swift_task_getCurrent()); + + withStatusRecordLock(parent, LockContext::OnTask, [&](ActiveTaskStatus &parentStatus) { + pool->addChildTask(child); + + // After getting parent's status record lock, do some sanity checks to + // see if parent task or group has state changes that need to be + // propagated to the child. + // + // This is the same logic that we would do if we were adding a child + // task status record - see also asyncLet_addImpl. Since we attach a + // child task to a TaskGroupRecord instead, we synchronize on the + // parent's task status and then update the child. + updateNewChildWithParentAndContainerState(child, parentStatus, /*group=*/nullptr, pool); }); } @@ -417,6 +447,19 @@ void swift::_swift_taskGroup_detachChild(TaskGroup *group, }); } +// FIXME: this is not actually right; is it? are we guaranteeing locking right in a pool? +void swift::_swift_taskPool_detachChild(TaskPool *pool, + AsyncTask *child) { + // We are called synchronously from the perspective of the owning task. + // That doesn't necessarily mean the owning task *is* the current task, + // though, just that it's not concurrently running. + auto parent = child->childFragment()->getParent(); + + withStatusRecordLock(parent, LockContext::OnTask, [&](ActiveTaskStatus &parentStatus) { + pool->removeChildTask(child); + }); +} + /****************************** CANCELLATION ******************************/ /**************************************************************************/ @@ -444,6 +487,13 @@ static void performCancellationAction(TaskStatusRecord *record) { return; } + // Task pools need their children to be cancelled, the same way as groups. + case TaskStatusRecordKind::TaskPool: { + auto poolRecord = cast(record); + _swift_taskPool_cancelAllChildren(poolRecord->getPool()); + return; + } + // Cancellation notifications need to be called. case TaskStatusRecordKind::CancellationNotification: { auto notification = @@ -535,6 +585,12 @@ static void performEscalationAction(TaskStatusRecord *record, swift_task_escalate(child, newPriority); return; } + case TaskStatusRecordKind::TaskPool: { + auto childRecord = cast(record); + for (AsyncTask *child: childRecord->children()) + swift_task_escalate(child, newPriority); + return; + } // Cancellation notifications can be ignore. case TaskStatusRecordKind::CancellationNotification: diff --git a/stdlib/public/Concurrency/Tracing.h b/stdlib/public/Concurrency/Tracing.h index f2329471272ff..b3db88a9b60ab 100644 --- a/stdlib/public/Concurrency/Tracing.h +++ b/stdlib/public/Concurrency/Tracing.h @@ -27,6 +27,7 @@ class ExecutorRef; struct HeapObject; class Job; class TaskGroup; +class TaskPool; class TaskStatusRecord; namespace concurrency { @@ -57,9 +58,12 @@ void actor_note_job_queue(HeapObject *actor, Job *first, // Task trace calls. -void task_create(AsyncTask *task, AsyncTask *parent, TaskGroup *group, +// FIXME(pool): This is ABI so we need to evolve this by adding another overload +void task_create(AsyncTask *task, AsyncTask *parent, + TaskGroup *group, TaskPool *pool, AsyncLet *asyncLet, uint8_t jobPriority, bool isChildTask, - bool isFuture, bool isGroupChildTask, bool isAsyncLetTask); + bool isFuture, bool isGroupChildTask, bool isPoolChildTask, + bool isAsyncLetTask); void task_destroy(AsyncTask *task); diff --git a/stdlib/public/Concurrency/TracingSignpost.h b/stdlib/public/Concurrency/TracingSignpost.h index ab9d98e52d0db..0872f3e2513b7 100644 --- a/stdlib/public/Concurrency/TracingSignpost.h +++ b/stdlib/public/Concurrency/TracingSignpost.h @@ -176,9 +176,11 @@ inline void actor_note_job_queue(HeapObject *actor, Job *first, // Task trace calls. -inline void task_create(AsyncTask *task, AsyncTask *parent, TaskGroup *group, +inline void task_create(AsyncTask *task, AsyncTask *parent, + TaskGroup *group, TaskPool *pool, AsyncLet *asyncLet, uint8_t jobPriority, - bool isChildTask, bool isFuture, bool isGroupChildTask, + bool isChildTask, bool isFuture, + bool isGroupChildTask, bool isPoolChildTask, bool isAsyncLetTask) { ENSURE_LOGS(); auto id = os_signpost_id_make_with_pointer(TaskLog, task); @@ -187,11 +189,11 @@ inline void task_create(AsyncTask *task, AsyncTask *parent, TaskGroup *group, TaskLog, id, SWIFT_LOG_TASK_LIFETIME_NAME, "task=%" PRIx64 " resumefn=%p jobPriority=%u isChildTask=%{bool}d, isFuture=%{bool}d " - "isGroupChildTask=%{bool}d isAsyncLetTask=%{bool}d parent=%" PRIx64 - " group=%p asyncLet=%p", + "isGroupChildTask=%{bool}d isPoolChildTask=%{bool}d isAsyncLetTask=%{bool}d parent=%" PRIx64 + " group=%p pool=%p asyncLet=%p", task->getTaskId(), task->getResumeFunctionForLogging(), jobPriority, - isChildTask, isFuture, isGroupChildTask, isAsyncLetTask, parentID, group, - asyncLet); + isChildTask, isFuture, isGroupChildTask, isPoolChildTask, isAsyncLetTask, parentID, + group, pool, asyncLet); } inline void task_destroy(AsyncTask *task) { diff --git a/stdlib/public/Concurrency/TracingStubs.h b/stdlib/public/Concurrency/TracingStubs.h index 4ab35aeb1745f..158c2e167c77c 100644 --- a/stdlib/public/Concurrency/TracingStubs.h +++ b/stdlib/public/Concurrency/TracingStubs.h @@ -42,9 +42,11 @@ inline void actor_state_changed(HeapObject *actor, Job *firstJob, inline void actor_note_job_queue(HeapObject *actor, Job *first, Job *(*getNext)(Job *)) {} -inline void task_create(AsyncTask *task, AsyncTask *parent, TaskGroup *group, +inline void task_create(AsyncTask *task, AsyncTask *parent, + TaskGroup *group, TaskPool *pool, AsyncLet *asyncLet, uint8_t jobPriority, - bool isChildTask, bool isFuture, bool isGroupChildTask, + bool isChildTask, bool isFuture, + bool isGroupChildTask, bool isPoolChildTask, bool isAsyncLetTask) {} inline void task_destroy(AsyncTask *task) {} diff --git a/stdlib/toolchain/Compatibility56/CompatibilityOverrideConcurrency.def b/stdlib/toolchain/Compatibility56/CompatibilityOverrideConcurrency.def index 74952605ff37e..07d2be0a41110 100644 --- a/stdlib/toolchain/Compatibility56/CompatibilityOverrideConcurrency.def +++ b/stdlib/toolchain/Compatibility56/CompatibilityOverrideConcurrency.def @@ -44,6 +44,7 @@ # define OVERRIDE_TASK OVERRIDE # define OVERRIDE_ASYNC_LET OVERRIDE # define OVERRIDE_TASK_GROUP OVERRIDE +# define OVERRIDE_TASK_POOL OVERRIDE # define OVERRIDE_TASK_LOCAL OVERRIDE # define OVERRIDE_TASK_STATUS OVERRIDE #else @@ -59,6 +60,9 @@ # ifndef OVERRIDE_TASK_GROUP # define OVERRIDE_TASK_GROUP(...) # endif +# ifndef OVERRIDE_TASK_POOL +# define OVERRIDE_TASK_POOL(...) +# endif # ifndef OVERRIDE_TASK_LOCAL # define OVERRIDE_TASK_LOCAL(...) # endif @@ -277,6 +281,47 @@ OVERRIDE_TASK_GROUP(taskGroup_addPending, bool, swift::, (TaskGroup *group, bool unconditionally), (group, unconditionally)) +OVERRIDE_TASK_POOL(taskPool_initialize, void, + SWIFT_EXPORT_FROM(swift_Concurrency), SWIFT_CC(swift), + swift::, (TaskPool *pool, const Metadata *Void), (pool, Void)) + +OVERRIDE_TASK_STATUS(taskPool_attachChild, void, + SWIFT_EXPORT_FROM(swift_Concurrency), SWIFT_CC(swift), + swift::, (TaskPool *pool, AsyncTask *child), + (pool, child)) + +OVERRIDE_TASK_POOL(taskPool_destroy, void, + SWIFT_EXPORT_FROM(swift_Concurrency), SWIFT_CC(swift), + swift::, (TaskPool *pool, const Metadata *T), (pool, T)) + +OVERRIDE_TASK_POOL(taskPool_wait_next_throwing, void, + SWIFT_EXPORT_FROM(swift_Concurrency), SWIFT_CC(swiftasync), + swift::, + (OpaqueValue *resultPointer, + SWIFT_ASYNC_CONTEXT AsyncContext *callerContext, + TaskPool *_pool, + ThrowingTaskFutureWaitContinuationFunction *resumeFn, + AsyncContext *callContext), + (resultPointer, callerContext, _pool, resumeFn, + callContext)) + +OVERRIDE_TASK_POOL(taskPool_isEmpty, bool, + SWIFT_EXPORT_FROM(swift_Concurrency), SWIFT_CC(swift), + swift::, (TaskPool *pool, const Metadata *T), (pool, T)) + +OVERRIDE_TASK_POOL(taskPool_isCancelled, bool, + SWIFT_EXPORT_FROM(swift_Concurrency), SWIFT_CC(swift), + swift::, (TaskPool *pool, const Metadata *T), (pool, T)) + +OVERRIDE_TASK_POOL(taskPool_cancelAll, void, + SWIFT_EXPORT_FROM(swift_Concurrency), SWIFT_CC(swift), + swift::, (TaskPool *pool, const Metadata *T), (pool, T)) + +OVERRIDE_TASK_POOL(taskPool_addPending, bool, + SWIFT_EXPORT_FROM(swift_Concurrency), SWIFT_CC(swift), + swift::, (TaskPool *pool, bool unconditionally), + (pool, unconditionally)) + OVERRIDE_TASK_LOCAL(task_reportIllegalTaskLocalBindingWithinWithTaskGroup, void, SWIFT_EXPORT_FROM(swift_Concurrency), SWIFT_CC(swift), swift::, diff --git a/stdlib/toolchain/Compatibility56/include/Concurrency/Task.h b/stdlib/toolchain/Compatibility56/include/Concurrency/Task.h index e0d55eaee4865..37bcac3950252 100644 --- a/stdlib/toolchain/Compatibility56/include/Concurrency/Task.h +++ b/stdlib/toolchain/Compatibility56/include/Concurrency/Task.h @@ -37,6 +37,7 @@ struct SwiftError; class TaskStatusRecord; class TaskOptionRecord; class TaskGroup; +class TaskPool; extern FullMetadata jobHeapMetadata; @@ -435,6 +436,47 @@ class AsyncTask : public Job { return reinterpret_cast(offset); } + // ==== TaskPool Child ------------------------------------------------------ + + /// A child task created by `pool.add` is called a "task pool child." + /// Upon completion, in addition to the usual future notifying all its waiters, + /// it must also `pool->offer` itself to the pool. + /// + /// This signalling is necessary to correctly implement the pool's `next()`. + class PoolChildFragment { + private: + TaskPool* Pool; + + friend class AsyncTask; + friend class TaskPool; + + public: + explicit PoolChildFragment(TaskPool *pool) + : Pool(pool) {} + + /// Return the pool this task should offer into when it completes. + TaskPool* getPool() { + return Pool; + } + }; + + // Checks if task is a child of a TaskPool task. + // + // A child task that is a pool child knows that it's parent is a pool + // and therefore may `poolOffer` to it upon completion. + bool hasPoolChildFragment() const { return Flags.task_isPoolChildTask(); } + + PoolChildFragment *poolChildFragment() { + assert(hasPoolChildFragment()); + + auto offset = reinterpret_cast(this); + offset += sizeof(AsyncTask); + if (hasChildFragment()) + offset += sizeof(ChildFragment); + + return reinterpret_cast(offset); + } + // ==== Future --------------------------------------------------------------- class FutureFragment { diff --git a/stdlib/toolchain/Compatibility56/include/Concurrency/TaskLocal.h b/stdlib/toolchain/Compatibility56/include/Concurrency/TaskLocal.h index 4610e2cf15217..8eadf8ff66deb 100644 --- a/stdlib/toolchain/Compatibility56/include/Concurrency/TaskLocal.h +++ b/stdlib/toolchain/Compatibility56/include/Concurrency/TaskLocal.h @@ -27,6 +27,7 @@ struct OpaqueValue; struct SwiftError; class TaskStatusRecord; class TaskGroup; +class TaskPool; // ==== Task Locals Values --------------------------------------------------- diff --git a/test/Concurrency/Runtime/async_taskpool_neverConsumingTasks.swift b/test/Concurrency/Runtime/async_taskpool_neverConsumingTasks.swift new file mode 100644 index 0000000000000..a05aff2b4b97f --- /dev/null +++ b/test/Concurrency/Runtime/async_taskpool_neverConsumingTasks.swift @@ -0,0 +1,75 @@ +// RUN: %target-run-simple-swift( -Xfrontend -disable-availability-checking -parse-as-library) | %FileCheck %s --dump-input=always +// REQUIRES: executable_test +// REQUIRES: concurrency +// REQUIRES: concurrency_runtime +// UNSUPPORTED: back_deployment_runtime +import Darwin + +actor Waiter { + let until: Int + var count: Int + + var cc: CheckedContinuation? + + init(until: Int) { + self.until = until + self.count = 0 + } + + func increment() { + self.count += 1 + fputs("> increment (\(self.count)/\(self.until))\n", stderr); + if self.until <= self.count { + if let cc = self.cc { + cc.resume(returning: self.count) + } + } + } + + func wait() async -> Int { + if self.until <= self.count { + fputs("> RETURN in Waiter\n", stderr); + return self.count + } + + return await withCheckedContinuation { cc in + fputs("> WAIT in Waiter\n", stderr); + self.cc = cc + } + } +} + +@available(SwiftStdlib 5.1, *) +func test_taskPool_neverConsume() async { + let until = 100 + let waiter = Waiter(until: until) + + let allTasks = await withTaskPool(returning: Int.self) { pool in + for n in 1...until { + fputs("> enqueue: \(n)\n", stderr); + pool.addTask { + fputs("> run: \(n) (cancelled: \(Task.isCancelled))\n", stderr); +// try? await Task.sleep(until: .now + .milliseconds(100), clock: .continuous) + await waiter.increment() + fputs("> done: \(n) (cancelled: \(Task.isCancelled))\n", stderr); + } + } + + // wait a little bit, so some tasks complete before we hit the implicit "wait at end of task group scope" + try? await Task.sleep(until: .now + .milliseconds(500), clock: .continuous) + + pool.cancelAll() + return until + } + + // CHECK: all tasks: 100 + print("all tasks: \(allTasks)") + print("actor: \(allTasks)") +} + +@available(SwiftStdlib 5.1, *) +@main struct Main { + static func main() async { + await test_taskPool_neverConsume() + } +} diff --git a/test/SILGen/async_builtins.swift b/test/SILGen/async_builtins.swift index 184001741658d..e76491ed82d81 100644 --- a/test/SILGen/async_builtins.swift +++ b/test/SILGen/async_builtins.swift @@ -32,6 +32,14 @@ public struct X { } } + // CHECK-LABEL: sil hidden [ossa] @$s4test1XV16launchPoolChild_5groupyx_BptlF : $@convention(method) (@in_guaranteed T, Builtin.RawPointer, X) -> () { + func launchPoolChild(_ value: T, pool: Builtin.RawPointer) { + // CHECK: builtin "createAsyncTaskInGroup"([[ZERO:%.*]] : $Int, [[POOL:%.*]] : $Builtin.RawPointer, [[FN:%.*]] : $@async @callee_guaranteed @substituted <τ_0_0> () -> (@out τ_0_0, @error any Error) for ) : $(Builtin.NativeObject, Builtin.RawPointer) + _ = Builtin.createAsyncTaskInPool(0, pool) { () async throws -> T in + return value + } + } + public func launchRocker(closure: @escaping () async throws -> T) { _ = Builtin.createAsyncTask(0, closure) } diff --git a/tools/swift-inspect/Sources/swift-inspect/Operations/DumpConcurrency.swift b/tools/swift-inspect/Sources/swift-inspect/Operations/DumpConcurrency.swift index 91035a2a9ce1e..5868947b231ea 100644 --- a/tools/swift-inspect/Sources/swift-inspect/Operations/DumpConcurrency.swift +++ b/tools/swift-inspect/Sources/swift-inspect/Operations/DumpConcurrency.swift @@ -46,6 +46,7 @@ fileprivate class ConcurrencyDumper { var isChildTask: Bool var isFuture: Bool var isGroupChildTask: Bool + // TODO: isPoolChildTask var isAsyncLetTask: Bool var maxPriority: UInt32 var isCancelled: Bool diff --git a/unittests/runtime/CompatibilityOverrideConcurrency.cpp b/unittests/runtime/CompatibilityOverrideConcurrency.cpp index 8af3425c458bb..8c81791ba5c44 100644 --- a/unittests/runtime/CompatibilityOverrideConcurrency.cpp +++ b/unittests/runtime/CompatibilityOverrideConcurrency.cpp @@ -181,6 +181,8 @@ TEST_F(CompatibilityOverrideConcurrencyTest, test_swift_asyncLet_end) { swift_asyncLet_end(nullptr); } +// TODO: handle TaskPool + TEST_F(CompatibilityOverrideConcurrencyTest, test_swift_taskGroup_initialize) { swift_taskGroup_initialize(nullptr, nullptr); } From 4dd1ffc1eb8b93d58f144e445cd407b99b3b484b Mon Sep 17 00:00:00 2001 From: Konrad `ktoso` Malawski Date: Mon, 28 Nov 2022 11:45:17 +0900 Subject: [PATCH 4/8] renames and cleanups --- include/swift/ABI/MetadataValues.h | 10 +- include/swift/ABI/TaskPool.h | 14 +- stdlib/public/BackDeployConcurrency/Task.cpp | 2 +- .../public/BackDeployConcurrency/Task.swift | 12 +- .../BackDeployConcurrency/TaskGroup.swift | 8 +- .../public/BackDeployConcurrency/TaskPool.cpp | 667 +++++++++--------- .../public/BackDeployConcurrency/TaskPool.h | 16 +- .../BackDeployConcurrency/TaskSleep.swift | 2 +- stdlib/public/Concurrency/Task.cpp | 2 +- stdlib/public/Concurrency/Task.swift | 12 +- stdlib/public/Concurrency/TaskGroup.swift | 20 +- stdlib/public/Concurrency/TaskPool.cpp | 23 +- stdlib/public/Concurrency/TaskPool.swift | 51 +- stdlib/public/Concurrency/TaskSleep.swift | 2 +- .../Concurrency/TaskSleepDuration.swift | 2 +- .../async_taskpool_dontLeakTasks.swift | 49 ++ 16 files changed, 474 insertions(+), 418 deletions(-) create mode 100644 test/Concurrency/Runtime/async_taskpool_dontLeakTasks.swift diff --git a/include/swift/ABI/MetadataValues.h b/include/swift/ABI/MetadataValues.h index 6ec000c4b2c0f..c2bea23132a11 100644 --- a/include/swift/ABI/MetadataValues.h +++ b/include/swift/ABI/MetadataValues.h @@ -2265,7 +2265,7 @@ class TaskCreateFlags : public FlagSet { Task_CopyTaskLocals = 10, Task_InheritContext = 11, Task_EnqueueJob = 12, - Task_AddPendingGroupTaskUnconditionally = 13, + Task_AddPendingGroupTaskUnconditionally = 13, // used for: TaskGroup, TaskPool }; explicit constexpr TaskCreateFlags(size_t bits) : FlagSet(bits) {} @@ -2290,12 +2290,8 @@ class TaskCreateFlags : public FlagSet { enqueueJob, setEnqueueJob) FLAGSET_DEFINE_FLAG_ACCESSORS(Task_AddPendingGroupTaskUnconditionally, - addPendingGroupTaskUnconditionally, - setAddPendingGroupTaskUnconditionally) - // re-use the group flag for adding to a pool - FLAGSET_DEFINE_FLAG_ACCESSORS(Task_AddPendingGroupTaskUnconditionally, - addPendingPoolTaskUnconditionally, - setAddPendingPoolTaskUnconditionally) + addPendingTaskUnconditionally, + setaddPendingTaskUnconditionally) }; /// Flags for schedulable jobs. diff --git a/include/swift/ABI/TaskPool.h b/include/swift/ABI/TaskPool.h index d149c5eecc1fc..37aa0dc9a7c59 100644 --- a/include/swift/ABI/TaskPool.h +++ b/include/swift/ABI/TaskPool.h @@ -27,7 +27,7 @@ namespace swift { -/// The task group is responsible for maintaining dynamically created child tasks. +/// The task pool is responsible for maintaining dynamically created child tasks. class alignas(Alignment_TaskPool) TaskPool { public: // These constructors do not initialize the group instance, and the @@ -38,21 +38,21 @@ class alignas(Alignment_TaskPool) TaskPool { void *PrivateData[NumWords_TaskPool]; - /// Upon a future task's completion, offer it to the task group it belongs to. + /// Upon a future task's completion, offer it to the task pool it belongs to. void offer(AsyncTask *completed, AsyncContext *context); /// Checks the cancellation status of the group. bool isCancelled(); - // Add a child task to the task group. Always called while holding the - // status record lock of the task group's owning task. + // Add a child task to the task pool. Always called while holding the + // status record lock of the task pool's owning task. void addChildTask(AsyncTask *task); - // Remove a child task from the task group. Always called while holding - // the status record lock of the task group's owning task. + // Remove a child task from the task pool. Always called while holding + // the status record lock of the task pool's owning task. void removeChildTask(AsyncTask *task); - // Provide accessor for task group's status record + // Provide accessor for task pool's status record TaskPoolTaskStatusRecord *getTaskRecord(); }; diff --git a/stdlib/public/BackDeployConcurrency/Task.cpp b/stdlib/public/BackDeployConcurrency/Task.cpp index 0d433c4a14ae0..e5cf4c9996b37 100644 --- a/stdlib/public/BackDeployConcurrency/Task.cpp +++ b/stdlib/public/BackDeployConcurrency/Task.cpp @@ -508,7 +508,7 @@ static AsyncTaskAndContext swift_task_create_commonImpl( } // Add to the task group, if requested. - if (taskCreateFlags.addPendingGroupTaskUnconditionally()) { + if (taskCreateFlags.addPendingTaskUnconditionally()) { assert(group && "Missing group"); swift_taskGroup_addPending(group, /*unconditionally=*/true); } diff --git a/stdlib/public/BackDeployConcurrency/Task.swift b/stdlib/public/BackDeployConcurrency/Task.swift index fe421c65159f9..79a752eee95e6 100644 --- a/stdlib/public/BackDeployConcurrency/Task.swift +++ b/stdlib/public/BackDeployConcurrency/Task.swift @@ -424,7 +424,7 @@ struct JobFlags { func taskCreateFlags( priority: TaskPriority?, isChildTask: Bool, copyTaskLocals: Bool, inheritContext: Bool, enqueueJob: Bool, - addPendingGroupTaskUnconditionally: Bool + addPendingTaskUnconditionally: Bool ) -> Int { var bits = 0 bits |= (bits & ~0xFF) | Int(priority?.rawValue ?? 0) @@ -440,7 +440,7 @@ func taskCreateFlags( if enqueueJob { bits |= 1 << 12 } - if addPendingGroupTaskUnconditionally { + if addPendingTaskUnconditionally { bits |= 1 << 13 } return bits @@ -483,7 +483,7 @@ extension Task where Failure == Never { let flags = taskCreateFlags( priority: priority, isChildTask: false, copyTaskLocals: true, inheritContext: true, enqueueJob: true, - addPendingGroupTaskUnconditionally: false) + addPendingTaskUnconditionally: false) // Create the asynchronous task. let (task, _) = Builtin.createAsyncTask(flags, operation) @@ -531,7 +531,7 @@ extension Task where Failure == Error { let flags = taskCreateFlags( priority: priority, isChildTask: false, copyTaskLocals: true, inheritContext: true, enqueueJob: true, - addPendingGroupTaskUnconditionally: false + addPendingTaskUnconditionally: false ) // Create the asynchronous task future. @@ -578,7 +578,7 @@ extension Task where Failure == Never { let flags = taskCreateFlags( priority: priority, isChildTask: false, copyTaskLocals: false, inheritContext: false, enqueueJob: true, - addPendingGroupTaskUnconditionally: false) + addPendingTaskUnconditionally: false) // Create the asynchronous task future. let (task, _) = Builtin.createAsyncTask(flags, operation) @@ -625,7 +625,7 @@ extension Task where Failure == Error { let flags = taskCreateFlags( priority: priority, isChildTask: false, copyTaskLocals: false, inheritContext: false, enqueueJob: true, - addPendingGroupTaskUnconditionally: false + addPendingTaskUnconditionally: false ) // Create the asynchronous task future. diff --git a/stdlib/public/BackDeployConcurrency/TaskGroup.swift b/stdlib/public/BackDeployConcurrency/TaskGroup.swift index 6955c5ee52552..de7d2f0958019 100644 --- a/stdlib/public/BackDeployConcurrency/TaskGroup.swift +++ b/stdlib/public/BackDeployConcurrency/TaskGroup.swift @@ -241,7 +241,7 @@ public struct TaskGroup { let flags = taskCreateFlags( priority: priority, isChildTask: true, copyTaskLocals: false, inheritContext: false, enqueueJob: true, - addPendingGroupTaskUnconditionally: true + addPendingTaskUnconditionally: true ) // Create the task in this group. @@ -276,7 +276,7 @@ public struct TaskGroup { let flags = taskCreateFlags( priority: priority, isChildTask: true, copyTaskLocals: false, inheritContext: false, enqueueJob: true, - addPendingGroupTaskUnconditionally: false + addPendingTaskUnconditionally: false ) // Create the task in this group. @@ -493,7 +493,7 @@ public struct ThrowingTaskGroup { let flags = taskCreateFlags( priority: priority, isChildTask: true, copyTaskLocals: false, inheritContext: false, enqueueJob: true, - addPendingGroupTaskUnconditionally: true + addPendingTaskUnconditionally: true ) // Create the task in this group. @@ -531,7 +531,7 @@ public struct ThrowingTaskGroup { let flags = taskCreateFlags( priority: priority, isChildTask: true, copyTaskLocals: false, inheritContext: false, enqueueJob: true, - addPendingGroupTaskUnconditionally: false + addPendingTaskUnconditionally: false ) // Create the task in this group. diff --git a/stdlib/public/BackDeployConcurrency/TaskPool.cpp b/stdlib/public/BackDeployConcurrency/TaskPool.cpp index 644224c21de3e..6860e616ff1d0 100644 --- a/stdlib/public/BackDeployConcurrency/TaskPool.cpp +++ b/stdlib/public/BackDeployConcurrency/TaskPool.cpp @@ -48,6 +48,21 @@ using namespace swift; +#if !SWIFT_STDLIB_SINGLE_THREADED_CONCURRENCY +#include +#endif + +#include +#if SWIFT_CONCURRENCY_ENABLE_DISPATCH +#include +#endif + +#if !defined(_WIN32) && !defined(__wasi__) && __has_include() +#include +#endif + +using namespace swift; + /******************************************************************************/ /*************************** TASK POOL ***************************************/ /******************************************************************************/ @@ -55,169 +70,137 @@ using namespace swift; using FutureFragment = AsyncTask::FutureFragment; namespace { -class TaskStatusRecord; - -class TaskPoolImpl: public TaskPoolTaskStatusRecord { -public: - /// Describes the status of the group. - enum class ReadyStatus : uintptr_t { - /// The task group is empty, no tasks are pending. - /// Return immediately, there is no point in suspending. - /// - /// The storage is not accessible. - Empty = 0b00, - - // not used: 0b01; same value as the PollStatus MustWait, - // which does not make sense for the ReadyStatus - - /// The future has completed with result (of type \c resultType). - Success = 0b10, - - /// The future has completed by throwing an error (an \c Error - /// existential). - Error = 0b11, - }; - - enum class PollStatus : uintptr_t { - /// The group is known to be empty and we can immediately return nil. - Empty = 0b00, + class TaskStatusRecord; + + class TaskPoolImpl: public TaskPoolTaskStatusRecord { + public: + /// Describes the status of the group. + enum class ReadyStatus : uintptr_t { + /// The task group is empty, no tasks are pending. + /// Return immediately, there is no point in suspending. + /// + /// The storage is not accessible. + Empty = 0b00, + + // not used: 0b01; same value as the PollStatus MustWait, + // which does not make sense for the ReadyStatus + + /// The future has completed with result (of type \c resultType). + Success = 0b10, + + /// The future has completed by throwing an error (an \c Error + /// existential). + Error = 0b11, + }; - /// The task has been enqueued to the groups wait queue. - MustWait = 0b01, + enum class PollStatus : uintptr_t { + /// The group is known to be empty and we can immediately return nil. + Empty = 0b00, - /// The task has completed with result (of type \c resultType). - Success = 0b10, + /// The task has been enqueued to the groups wait queue. + MustWait = 0b01, - /// The task has completed by throwing an error (an \c Error existential). - Error = 0b11, - }; + /// The task has completed with result (of type \c resultType). + Success = 0b10, - /// The result of waiting on the TaskPoolImpl. - struct PollResult { - PollStatus status; // TODO: pack it into storage pointer or not worth it? - -// /// Storage for the result of the future. -// /// -// /// When the future completed normally, this is a pointer to the storage -// /// of the result value, which lives inside the future task itself. -// /// -// /// When the future completed by throwing an error, this is the error -// /// object itself. -// OpaqueValue *storage; -// -// const Metadata *voidType; - -// /// The completed task, if necessary to keep alive until consumed by next(). -// /// -// /// # Important: swift_release -// /// If if a task is returned here, the task MUST be swift_released -// /// once we are done with it, to balance out the retain made before -// /// when the task was enqueued into the ready queue to keep it alive -// /// until a next() call eventually picks it up. -// AsyncTask *retainedTask; - -// bool isStorageAccessible() { -// return status == PollStatus::Success || -// status == PollStatus::Error || -// status == PollStatus::Empty; -// } + /// The task has completed by throwing an error (an \c Error existential). + Error = 0b11, + }; - static PollResult get(AsyncTask *asyncTask, bool hadErrorResult) { - // A TaskPool task is always Void, so we don't even have to collect the result from its future fragment. - return PollResult{ - /*status*/ hadErrorResult ? - PollStatus::Error : - PollStatus::Success -// , -// /*storage*/ hadErrorResult ? -// reinterpret_cast(fragment->getError()) : -// fragment->getStoragePtr(), -// /*voidType*/fragment->getResultType(), -// /*task*/ asyncTask - }; - } - }; + /// The result of waiting on the TaskPoolImpl. + struct PollResult { + PollStatus status; // TODO: pack it into storage pointer or not worth it? + + static PollResult get(AsyncTask *asyncTask, bool hadErrorResult) { + // A TaskPool task is always Void, so we don't even have to collect the result from its future fragment. + return PollResult{ + /*status*/ hadErrorResult ? + PollStatus::Error : + PollStatus::Success + }; + } + }; - /// An item within the pending queue. - struct PendingQueueItem { - AsyncTask * const storage; + /// An item within the pending queue. + struct PendingQueueItem { + AsyncTask * const storage; - AsyncTask *getTask() const { - return storage; - } + AsyncTask *getTask() const { + return storage; + } - static PendingQueueItem get(AsyncTask *task) { - assert(task == nullptr || task->isFuture()); - return PendingQueueItem{task}; - } - }; + static PendingQueueItem get(AsyncTask *task) { + assert(task == nullptr || task->isFuture()); + return PendingQueueItem{task}; + } + }; - struct PoolStatus { - static const uint64_t cancelled = 0b1000000000000000000000000000000000000000000000000000000000000000; - static const uint64_t waiting = 0b0100000000000000000000000000000000000000000000000000000000000000; + struct PoolStatus { + static const uint64_t cancelled = 0b1000000000000000000000000000000000000000000000000000000000000000; + static const uint64_t waiting = 0b0100000000000000000000000000000000000000000000000000000000000000; - // 62 bits for pending tasks counter - static const uint64_t maskPending = 0b0011111111111111111111111111111111111111111111111111111111111111; - static const uint64_t onePendingTask = 0b0000000000000000000000000000000000000000000000000000000000000001; + // 62 bits for pending tasks counter + static const uint64_t maskPending = 0b0011111111111111111111111111111111111111111111111111111111111111; + static const uint64_t onePendingTask = 0b0000000000000000000000000000000000000000000000000000000000000001; - uint64_t status; + uint64_t status; - bool isCancelled() { - return (status & cancelled) > 0; - } + bool isCancelled() { + return (status & cancelled) > 0; + } - bool hasWaitingTask() { - return (status & waiting) > 0; - } + bool hasWaitingTask() { + return (status & waiting) > 0; + } - unsigned int pendingTasks() { - return (status & maskPending); - } + unsigned int pendingTasks() { + return (status & maskPending); + } - bool isEmpty() { - return pendingTasks() == 0; - } + bool isEmpty() { + return pendingTasks() == 0; + } - /// Status value decrementing the Ready, Pending and Waiting counters by one. - PoolStatus completingPendingWaiting() { - assert(pendingTasks() && - "can only complete waiting task when pending tasks available"); - assert(hasWaitingTask() && - "can only complete waiting task when waiting task available"); - return PoolStatus{status - waiting - onePendingTask}; - } + /// Status value decrementing the Ready, Pending and Waiting counters by one. + PoolStatus completingPendingWaiting() { + assert(pendingTasks() && + "can only complete waiting task when pending tasks available"); + assert(hasWaitingTask() && + "can only complete waiting task when waiting task available"); + return PoolStatus{status - waiting - onePendingTask}; + } - PoolStatus completingWaiting() { - assert(hasWaitingTask() && - "must have waiting task to complete it"); - return PoolStatus{status - waiting}; - } + PoolStatus completingWaiting() { + assert(hasWaitingTask() && + "must have waiting task to complete it"); + return PoolStatus{status - waiting}; + } - /// Pretty prints the status, as follows: - /// PoolStatus{ P:{pending tasks} W:{waiting tasks} {binary repr} } - std::string to_string() { - std::string str; - str.append("PoolStatus{ "); - str.append("C:"); // cancelled - str.append(isCancelled() ? "y " : "n "); - str.append("W:"); // has waiting task - str.append(hasWaitingTask() ? "y " : "n "); - str.append(" P:"); // pending - str.append(std::to_string(pendingTasks())); - str.append(" " + std::bitset<64>(status).to_string()); - str.append(" }"); - return str; - } + /// Pretty prints the status, as follows: + /// PoolStatus{ P:{pending tasks} W:{waiting tasks} {binary repr} } + std::string to_string() { + std::string str; + str.append("PoolStatus{ "); + str.append("C:"); // cancelled + str.append(isCancelled() ? "y " : "n "); + str.append("W:"); // has waiting task + str.append(hasWaitingTask() ? "y " : "n "); + str.append(" P:"); // pending + str.append(std::to_string(pendingTasks())); + str.append(" " + std::bitset<64>(status).to_string()); + str.append(" }"); + return str; + } - /// Initially there are no waiting and no pending tasks. - static const PoolStatus initial() { - return PoolStatus{0}; + /// Initially there are no waiting and no pending tasks. + static const PoolStatus initial() { + return PoolStatus{0}; + }; }; - }; -private: + private: #if SWIFT_STDLIB_SINGLE_THREADED_CONCURRENCY || SWIFT_CONCURRENCY_TASK_TO_THREAD_MODEL - // Synchronization is simple here. In a single threaded mode, all swift tasks + // Synchronization is simple here. In a single threaded mode, all swift tasks // run on a single thread so no coordination is needed. In a task-to-thread // model, only the parent task which created the task group can // @@ -229,129 +212,132 @@ class TaskPoolImpl: public TaskPoolTaskStatusRecord { void lock() const {} void unlock() const {} #else - // TODO: move to lockless via the status atomic (make readyQueue an mpsc_queue_t) - mutable std::mutex mutex_; + // TODO: move to lockless via the status atomic (make readyQueue an mpsc_queue_t) + mutable std::mutex mutex_; - void lock() const { mutex_.lock(); } - void unlock() const { mutex_.unlock(); } + void lock() const { mutex_.lock(); } + void unlock() const { mutex_.unlock(); } #endif - /// Used for queue management, counting number of waiting and ready tasks - std::atomic status; + /// Used for queue management, counting number of waiting and ready tasks + std::atomic status; -// /// Queue containing completed tasks offered into this group. -// /// -// /// The low bits contain the status, the rest of the pointer is the -// /// AsyncTask. -// NaiveQueue readyQueue; - - /// The task currently waiting on `group.next()`. Since only the owning - /// task can ever be waiting on a group, this is just either a reference - /// to that task or null. - std::atomic waitQueue; - - const Metadata *voidType; // TODO: must be Void so just assume it - - friend class ::swift::AsyncTask; - -public: - explicit TaskPoolImpl(const Metadata *T) - : TaskPoolTaskStatusRecord(), - status(PoolStatus::initial().status), -// readyQueue(), - waitQueue(nullptr), - voidType(T) - {} - - TaskPoolTaskStatusRecord *getTaskRecord() { - return reinterpret_cast(this); - } + /// The task currently waiting on `group.next()`. Since only the owning + /// task can ever be waiting on a group, this is just either a reference + /// to that task or null. + std::atomic waitQueue; - /// Destroy the storage associated with the group. - void destroy(); - bool isEmpty() { - auto oldStatus = PoolStatus{status.load(std::memory_order_relaxed)}; - return oldStatus.pendingTasks() == 0; - } + friend class ::swift::AsyncTask; - bool isCancelled() { - auto oldStatus = PoolStatus{status.load(std::memory_order_relaxed)}; - return oldStatus.isCancelled(); - } + public: + const Metadata *voidType; - /// Cancel the task group and all tasks within it. - /// - /// Returns `true` if this is the first time cancelling the group, false otherwise. - bool cancelAll(); + explicit TaskPoolImpl(const Metadata *T) + : TaskPoolTaskStatusRecord(), + status(PoolStatus::initial().status), + waitQueue(nullptr), + voidType(T) + {} - PoolStatus statusCancel() { - auto old = status.fetch_or(PoolStatus::cancelled, - std::memory_order_relaxed); - return PoolStatus{old}; - } + TaskPoolTaskStatusRecord *getTaskRecord() { + return reinterpret_cast(this); + } - /// Returns *assumed* new status, including the just performed +1. - PoolStatus statusMarkWaitingAssumeAcquire() { - auto old = status.fetch_or(PoolStatus::waiting, std::memory_order_acquire); - return PoolStatus{old | PoolStatus::waiting}; - } + /// Destroy the storage associated with the group. + void destroy(); - PoolStatus statusRemoveWaiting() { - auto old = status.fetch_and(~PoolStatus::waiting, - std::memory_order_release); - return PoolStatus{old}; - } + bool isEmpty() { + auto oldStatus = PoolStatus{status.load(std::memory_order_relaxed)}; + return oldStatus.pendingTasks() == 0; + } - // NOTE: the following change from a TaskGroup which adds ready; we just remove pending. - // statusAddReadyAssumeAcquire >>>> statusDecrementPendingAssumeAcquire - /// Returns *assumed* new status, including the just performed +1. - PoolStatus statusDecrementPendingAssumeAcquire() { - auto old = status.fetch_sub(PoolStatus::onePendingTask, - std::memory_order_acquire); - return PoolStatus{old - PoolStatus::onePendingTask}; - } + bool isCancelled() { + auto oldStatus = PoolStatus{status.load(std::memory_order_relaxed)}; + return oldStatus.isCancelled(); + } - PoolStatus statusRemovePendingAcquire() { - auto old = status.fetch_add(PoolStatus::onePendingTask, - std::memory_order_acquire); - return PoolStatus{old - PoolStatus::onePendingTask}; - } + /// Cancel the task group and all tasks within it. + /// + /// Returns `true` if this is the first time cancelling the group, false otherwise. + bool cancelAll(); - /// Add a single pending task to the status counter. - /// This is used to implement next() properly, as we need to know if there - /// are pending tasks worth suspending/waiting for or not. - /// - /// Note that the group does *not* store child tasks at all, as they are - /// stored in the `TaskPoolTaskStatusRecord` inside the current task, that - /// is currently executing the group. Here we only need the counts of - /// pending/ready tasks. - /// - /// If the `unconditionally` parameter is `true` the operation always successfully - /// adds a pending task, even if the group is cancelled. If the unconditionally - /// flag is `false`, the added pending count will be *reverted* before returning. - /// This is because we will NOT add a task to a cancelled group, unless doing - /// so unconditionally. - /// - /// Returns *assumed* new status, including the just performed +1. - PoolStatus statusAddPendingTaskRelaxed(bool unconditionally) { - auto old = status.fetch_add(PoolStatus::onePendingTask, - std::memory_order_relaxed); - auto s = PoolStatus{old + PoolStatus::onePendingTask}; + PoolStatus statusCancel() { + auto old = status.fetch_or(PoolStatus::cancelled, + std::memory_order_relaxed); + return PoolStatus{old}; + } + + /// Returns *assumed* new status, including the just performed +1. + PoolStatus statusMarkWaitingAssumeAcquire() { + auto old = status.fetch_or(PoolStatus::waiting, std::memory_order_acquire); + return PoolStatus{old | PoolStatus::waiting}; + } + + PoolStatus statusRemoveWaiting() { + auto old = status.fetch_and(~PoolStatus::waiting, + std::memory_order_release); + return PoolStatus{old}; + } + + /// Decrement the pending task count. + /// Returns *assumed* new status, including the just performed +1. + PoolStatus statusDecrementPendingAssumeAcquire() { + auto old = status.fetch_sub(PoolStatus::onePendingTask, + std::memory_order_acquire); + assert(PoolStatus{old}.pendingTasks() > 0 && "attempted to decrement pending count when it was 0 already"); + return PoolStatus{old - PoolStatus::onePendingTask}; + } + + /// Increment the pending task count. + /// + /// Returns *assumed* new status, including the just performed -1. + PoolStatus statusIncrementPendingAssumeAcquire() { + auto old = status.fetch_add(PoolStatus::onePendingTask, + std::memory_order_acquire); + return PoolStatus{old + PoolStatus::onePendingTask}; + } - if (!unconditionally && s.isCancelled()) { - // revert that add, it was meaningless + /// Similar to decrementing the pending count, however does so 'relaxed'. + /// Used to undo an optimistic increment, when the pool already is cancelled. + /// + /// Returns *assumed* new status, including the just performed -1. + PoolStatus statusUndoIncrementPendingAssumeRelaxed() { auto o = status.fetch_sub(PoolStatus::onePendingTask, std::memory_order_relaxed); - s = PoolStatus{o - PoolStatus::onePendingTask}; + return PoolStatus{o - PoolStatus::onePendingTask}; } - return s; - } + /// Add a single pending task to the status counter. + /// This is used to implement next() properly, as we need to know if there + /// are pending tasks worth suspending/waiting for or not. + /// + /// Note that the group does *not* store child tasks at all, as they are + /// stored in the `TaskPoolTaskStatusRecord` inside the current task, that + /// is currently executing the group. Here we only need the counts of + /// pending/ready tasks. + /// + /// If the `unconditionally` parameter is `true` the operation always successfully + /// adds a pending task, even if the group is cancelled. If the unconditionally + /// flag is `false`, the added pending count will be *reverted* before returning. + /// This is because we will NOT add a task to a cancelled group, unless doing + /// so unconditionally. + /// + /// Returns *assumed* new status, including the just performed +1. + PoolStatus statusAddPendingTaskRelaxed(bool unconditionally) { + auto assumed = statusIncrementPendingAssumeAcquire(); - PoolStatus statusLoadRelaxed() { - return PoolStatus{status.load(std::memory_order_relaxed)}; - } + if (!unconditionally && assumed.isCancelled()) { + // revert that add, it was meaningless + return statusUndoIncrementPendingAssumeRelaxed(); + } + fprintf(stderr, "[%s:%d](%s) status after add: %s\n", __FILE_NAME__, __LINE__, __FUNCTION__, assumed.to_string().c_str()); + return assumed; + } + + PoolStatus statusLoadRelaxed() { + return PoolStatus{status.load(std::memory_order_relaxed)}; + } // /// Compare-and-set old status to a status derived from the old one, // /// by simultaneously decrementing one Pending and one Waiting tasks. @@ -372,31 +358,31 @@ class TaskPoolImpl: public TaskPoolTaskStatusRecord { // } - /// Offer result of a task into this task pool. - /// - /// Unlike a task group, result values are never stored and we immediately - /// release the task after decrementing the `pending` count in the pool's status. - /// - /// If the TaskPool is currently "draining" tasks (i.e. its body has completed), - /// there may be a `waiting` task. If so, and this is the last pending task, - /// this offer will resume it, allowing the TaskPool to complete and destroy itself. - void offer(AsyncTask *completed, AsyncContext *context); - - /// A `TaskPool` is not able to wait on individual completions, - /// instead, it can only await on "all pending tasks have been processed". - /// - /// - /// If unable to complete the waiting task immediately (with an readily - /// available completed task), either returns an `PollStatus::Empty` - /// result if it is known that no pending tasks in the group, - /// or a `PollStatus::MustWait` result if there are tasks in flight - /// and the waitingTask eventually be woken up by a completion. - PollResult waitAll(AsyncTask *waitingTask); - -private: - /// Enqueue the completed task onto ready queue if there are no waiting tasks yet - PoolStatus completeTask(AsyncTask *completedTask); -}; + /// Offer result of a task into this task pool. + /// + /// Unlike a task group, result values are never stored and we immediately + /// release the task after decrementing the `pending` count in the pool's status. + /// + /// If the TaskPool is currently "draining" tasks (i.e. its body has completed), + /// there may be a `waiting` task. If so, and this is the last pending task, + /// this offer will resume it, allowing the TaskPool to complete and destroy itself. + void offer(AsyncTask *completed, AsyncContext *context); + + /// A `TaskPool` is not able to wait on individual completions, + /// instead, it can only await on "all pending tasks have been processed". + /// + /// + /// If unable to complete the waiting task immediately (with an readily + /// available completed task), either returns an `PollStatus::Empty` + /// result if it is known that no pending tasks in the group, + /// or a `PollStatus::MustWait` result if there are tasks in flight + /// and the waitingTask eventually be woken up by a completion. + PollResult waitAll(AsyncTask *waitingTask); + +//private: +// /// Enqueue the completed task onto ready queue if there are no waiting tasks yet +// PoolStatus completeTask(AsyncTask *completedTask); + }; } // end anonymous namespace @@ -421,7 +407,7 @@ static TaskPool *asAbstract(TaskPoolImpl *group) { } TaskPoolTaskStatusRecord * TaskPool::getTaskRecord() { - return asImpl(this)->getTaskRecord(); + return asImpl(this)->getTaskRecord(); } // ============================================================================= @@ -429,12 +415,12 @@ TaskPoolTaskStatusRecord * TaskPool::getTaskRecord() { // Initializes into the preallocated _pool an actual TaskPoolImpl. SWIFT_CC(swift) -static void swift_taskPool_initializeImpl(TaskPool *group, const Metadata *Void) { - SWIFT_TASK_DEBUG_LOG("creating task group = %p", group); +static void swift_taskPool_initializeImpl(TaskPool *pool, const Metadata *Void) { + SWIFT_TASK_DEBUG_LOG("creating task pool = %p", pool); - TaskPoolImpl *impl = ::new (group) TaskPoolImpl(Void); + TaskPoolImpl *impl = ::new (pool) TaskPoolImpl(Void); auto record = impl->getTaskRecord(); - assert(impl == record && "the group IS the task record"); + assert(impl == record && "the pool IS the task record"); // ok, now that the group actually is initialized: attach it to the task addStatusRecord(record, [&](ActiveTaskStatus parentStatus) { @@ -490,8 +476,8 @@ void TaskPoolImpl::destroy() { SWIFT_TASK_DEBUG_LOG("destroying task group = %p", this); if (!this->isEmpty()) { auto status = this->statusLoadRelaxed(); - SWIFT_TASK_DEBUG_LOG("destroying task group = %p, tasks .ready = %d, .pending = %d", - this, status.readyTasks(), status.pendingTasks()); + SWIFT_TASK_DEBUG_LOG("destroying task group = %p, .pending = %d", + this, status.pendingTasks()); } assert(this->isEmpty() && "Attempted to destroy non-empty task group!"); @@ -543,52 +529,52 @@ bool TaskPool::isCancelled() { static void fillPoolNextVoidResult(TaskFutureWaitAsyncContext *context, const Metadata *voidType, PollResult result) { + fprintf(stderr, "[%s:%d](%s) fill in void\n", __FILE_NAME__, __LINE__, __FUNCTION__); /// Fill in the result value switch (result.status) { - case PollStatus::MustWait: - assert(false && "filling a waiting status?"); - return; + case PollStatus::MustWait: + assert(false && "filling a waiting status?"); + return; - case PollStatus::Error: { - assert(false && "cannot have errors"); - return; - } + case PollStatus::Error: { + assert(false && "cannot have errors"); + return; + } - case PollStatus::Success: { - // Initialize the result as an Optional. -// const Metadata *voidType = nullptr; // result.voidType; // FIXME: should be Void type - OpaqueValue *destPtr = context->successResultPointer; - // TODO: figure out a way to try to optimistically take the - // value out of the finished task's future, if there are no - // remaining references to it. - voidType->vw_initializeWithCopy(destPtr, nullptr); - voidType->vw_storeEnumTagSinglePayload(destPtr, 0, 1); - return; - } + case PollStatus::Success: { + // Initialize the result as an Optional. + // const Metadata *voidType = nullptr; // result.voidType; // FIXME: should be Void type + OpaqueValue *destPtr = context->successResultPointer; + // TODO: figure out a way to try to optimistically take the + // value out of the finished task's future, if there are no + // remaining references to it. + voidType->vw_initializeWithCopy(destPtr, nullptr); + voidType->vw_storeEnumTagSinglePayload(destPtr, 0, 1); + return; + } - case PollStatus::Empty: { - // Initialize the result as a nil Optional. -// const Metadata *voidType = nullptr; // result.voidType; // FIXME: should be Void type - OpaqueValue *destPtr = context->successResultPointer; - voidType->vw_storeEnumTagSinglePayload(destPtr, 1, 1); - return; - } + case PollStatus::Empty: { + // Initialize the result as a nil Optional. + // const Metadata *voidType = nullptr; // result.voidType; // FIXME: should be Void type + OpaqueValue *destPtr = context->successResultPointer; + voidType->vw_storeEnumTagSinglePayload(destPtr, 1, 1); + return; + } } } -// TaskPool is locked upon entry and exit -TaskPoolImpl::PoolStatus TaskPoolImpl::completeTask(AsyncTask *completedTask) { - SWIFT_TASK_DEBUG_LOG("pool does not retain tasks for their results; we're done here = %p", completedTask); - // DO NOT RETAIN THE TASK. - // We know it is Void, so we don't need to store the result; - // By releasing tasks eagerly we're able to keep "infinite" task groups, - // running, that never consume their values. Even more-so, - - return this->statusDecrementPendingAssumeAcquire(); -} +//// TaskPool is locked upon entry and exit +//TaskPoolImpl::PoolStatus TaskPoolImpl::completeTask(AsyncTask *completedTask) { +// SWIFT_TASK_DEBUG_LOG("pool does not retain tasks for their results; we're done here = %p", completedTask); +// // DO NOT RETAIN THE TASK. +// // We know it is Void, so we don't need to store the result; +// // By releasing tasks eagerly we're able to keep "infinite" task groups, +// // running, that never consume their values. Even more-so, +// +// return this->statusDecrementPendingAssumeAcquire(); +//} void TaskPoolImpl::offer(AsyncTask *completedTask, AsyncContext *context) { - fprintf(stderr, "[%s:%d](%s) offer task = %p, pool = %p\n", __FILE_NAME__, __LINE__, __FUNCTION__, completedTask, pool); assert(completedTask); assert(completedTask->isFuture()); assert(completedTask->hasChildFragment()); @@ -612,6 +598,9 @@ void TaskPoolImpl::offer(AsyncTask *completedTask, AsyncContext *context) { hadErrorResult = true; } + SWIFT_TASK_DEBUG_LOG("pool(%p) child task=%p completed, detach", this, completedTask); + _swift_taskPool_detachChild(asAbstract(this), completedTask); + // ==== a) has waiting task. // A TaskPool only has a waiting task while terminating, and that task shall only be resumed once // all tasks have been processed. Only resume the waiting task if this was the last pending task. @@ -689,7 +678,7 @@ void TaskPoolImpl::offer(AsyncTask *completedTask, AsyncContext *context) { // ready for it, and will process it immediately without suspending. assert(!waitQueue.load(std::memory_order_relaxed)); - completeTask(completedTask); + // completeTask(completedTask); unlock(); // TODO: remove fragment lock, and use status for synchronization } @@ -724,7 +713,7 @@ SWIFT_CC(swiftasync) static void workaround_function_swift_taskPool_waitAllImpl( #endif // ============================================================================= -// ==== group.next() implementation (wait_next and groupPoll) ------------------ +// ==== pool.waitAll() implementation ------------------------------------------ SWIFT_CC(swiftasync) static void swift_taskPool_waitAllImpl( @@ -732,6 +721,7 @@ static void swift_taskPool_waitAllImpl( TaskPool *_pool, ThrowingTaskFutureWaitContinuationFunction *resumeFunction, AsyncContext *rawContext) { + fprintf(stderr, "[%s:%d](%s) wait all; pool = %p\n", __FILE_NAME__, __LINE__, __FUNCTION__, _pool); auto waitingTask = swift_task_getCurrent(); waitingTask->ResumeTask = TASK_POOL_wait_resume_adapter; waitingTask->ResumeContext = rawContext; @@ -744,47 +734,46 @@ static void swift_taskPool_waitAllImpl( context->successResultPointer = resultPointer; auto pool = asImpl(_pool); - assert(pool && "swift_taskPool_waitAll was passed context without group!"); + assert(pool && "swift_taskPool_waitAll was passed context without pool!"); PollResult polled = pool->waitAll(waitingTask); switch (polled.status) { - case PollStatus::MustWait: - SWIFT_TASK_DEBUG_LOG("poll group = %p, no ready tasks, waiting task = %p", - group, waitingTask); - // The waiting task has been queued on the channel, - // there were pending tasks so it will be woken up eventually. + case PollStatus::MustWait: + SWIFT_TASK_DEBUG_LOG("poll pool = %p, no ready tasks, waiting task = %p", + pool, waitingTask); + // The waiting task has been queued on the channel, + // there were pending tasks so it will be woken up eventually. #ifdef __ARM_ARCH_7K__ - return workaround_function_swift_taskPool_waitAllImpl( + return workaround_function_swift_taskPool_waitAllImpl( resultPointer, callerContext, _pool, resumeFunction, rawContext); #else /* __ARM_ARCH_7K__ */ - return; + return; #endif /* __ARM_ARCH_7K__ */ - case PollStatus::Empty: - case PollStatus::Error: - case PollStatus::Success: - SWIFT_TASK_DEBUG_LOG("poll group = %p, task = %p, ready task available = %p", - group, waitingTask, polled.retainedTask); -// if (group->eagerlyReleaseCompleteTasks) { - fillPoolNextVoidResult(context, pool->voidType, polled); + case PollStatus::Empty: + case PollStatus::Error: + case PollStatus::Success: + SWIFT_TASK_DEBUG_LOG("[pool:%p] poll, task = %p", pool, waitingTask); +// if (pool->eagerlyReleaseCompleteTasks) { + fillPoolNextVoidResult(context, pool->voidType, polled); // } else { // fillPoolNextResult(context, polled); // } // if (auto completedTask = polled.retainedTask) { -// // Remove the child from the task group's running tasks list. -// _swift_taskPool_detachChild(asAbstract(group), completedTask); +// // Remove the child from the task pool's running tasks list. +// _swift_taskPool_detachChild(asAbstract(pool), completedTask); // // // Balance the retain done by completeTask. // swift_release(completedTask); // } - return waitingTask->runInFullyEstablishedContext(); + return waitingTask->runInFullyEstablishedContext(); } } PollResult TaskPoolImpl::waitAll(AsyncTask *waitingTask) { - lock(); // TODO: remove group lock, and use status for synchronization - SWIFT_TASK_DEBUG_LOG("pool = %p, waitAll pending", this); + lock(); // TODO: remove pool lock, and use status for synchronization + SWIFT_TASK_DEBUG_LOG("[pool:%p], waitAll pending; status = %s", this, statusLoadRelaxed().to_string().c_str()); PollResult result; @@ -792,18 +781,18 @@ PollResult TaskPoolImpl::waitAll(AsyncTask *waitingTask) { bool hasSuspended = false; bool haveRunOneChildTaskInline = false; -reevaluate_if_taskgroup_has_results:; + reevaluate_if_taskpool_has_results:; auto assumed = statusMarkWaitingAssumeAcquire(); // ==== 1) bail out early if no tasks are pending ---------------------------- if (assumed.isEmpty()) { - SWIFT_TASK_DEBUG_LOG("poll group = %p, group is empty, no pending tasks", this); + SWIFT_TASK_DEBUG_LOG("[pool:%p] poll, is empty, no pending tasks", this); // No tasks in flight, we know no tasks were submitted before this poll // was issued, and if we parked here we'd potentially never be woken up. // Bail out and return `nil` from `group.next()`. statusRemoveWaiting(); result.status = PollStatus::Empty; // result.voidType = this->voidType; - unlock(); // TODO: remove group lock, and use status for synchronization + unlock(); // TODO: remove pool lock, and use status for synchronization return result; } @@ -842,7 +831,7 @@ reevaluate_if_taskgroup_has_results:; // We are back to being the parent task and now that we've run the child // task, we should reevaluate parent task _swift_task_setCurrent(oldTask); - goto reevaluate_if_taskgroup_has_results; + goto reevaluate_if_taskpool_has_results; #endif // no ready tasks, so we must wait. result.status = PollStatus::MustWait; @@ -910,6 +899,7 @@ static void swift_task_cancel_pool_child_tasksImpl(TaskPool *pool) { /// owning task of the task group or while holding the owning task's /// status record lock. void swift::_swift_taskPool_cancelAllChildren(TaskPool *pool) { + SWIFT_TASK_DEBUG_LOG("pool(%p) cancel all children tasks", pool); // Because only the owning task of the task group can modify the // child list of a task group status record, and it can only do so // while holding the owning task's status record lock, we do not need @@ -923,6 +913,7 @@ void swift::_swift_taskPool_cancelAllChildren(TaskPool *pool) { SWIFT_CC(swift) static bool swift_taskPool_addPendingImpl(TaskPool *pool, bool unconditionally) { + fprintf(stderr, "[%s:%d](%s) add pending task to pool = %p\n", __FILE_NAME__, __LINE__, __FUNCTION__, pool); auto assumed = asImpl(pool)->statusAddPendingTaskRelaxed(unconditionally); SWIFT_TASK_DEBUG_LOG("add pending %s to pool %p, tasks pending = %d", unconditionally ? "unconditionally" : "", diff --git a/stdlib/public/BackDeployConcurrency/TaskPool.h b/stdlib/public/BackDeployConcurrency/TaskPool.h index 12d973afdabe3..7fde2aa4590cb 100644 --- a/stdlib/public/BackDeployConcurrency/TaskPool.h +++ b/stdlib/public/BackDeployConcurrency/TaskPool.h @@ -27,8 +27,8 @@ namespace swift { -/// The task group is responsible for maintaining dynamically created child tasks. - class alignas(Alignment_TaskPool) TaskPool { +/// The task pool is responsible for maintaining dynamically created child tasks. +class alignas(Alignment_TaskPool) TaskPool { public: // These constructors do not initialize the group instance, and the // destructor does not destroy the group instance; you must call @@ -38,21 +38,21 @@ namespace swift { void *PrivateData[NumWords_TaskPool]; - /// Upon a future task's completion, offer it to the task group it belongs to. + /// Upon a future task's completion, offer it to the task pool it belongs to. void offer(AsyncTask *completed, AsyncContext *context); /// Checks the cancellation status of the group. bool isCancelled(); - // Add a child task to the task group. Always called while holding the - // status record lock of the task group's owning task. + // Add a child task to the task pool. Always called while holding the + // status record lock of the task pool's owning task. void addChildTask(AsyncTask *task); - // Remove a child task from the task group. Always called while holding - // the status record lock of the task group's owning task. + // Remove a child task from the task pool. Always called while holding + // the status record lock of the task pool's owning task. void removeChildTask(AsyncTask *task); - // Provide accessor for task group's status record + // Provide accessor for task pool's status record TaskPoolTaskStatusRecord *getTaskRecord(); }; diff --git a/stdlib/public/BackDeployConcurrency/TaskSleep.swift b/stdlib/public/BackDeployConcurrency/TaskSleep.swift index 4ac6eeb0709cf..6d15acb21f9a0 100644 --- a/stdlib/public/BackDeployConcurrency/TaskSleep.swift +++ b/stdlib/public/BackDeployConcurrency/TaskSleep.swift @@ -238,7 +238,7 @@ extension Task where Success == Never, Failure == Never { let sleepTaskFlags = taskCreateFlags( priority: nil, isChildTask: false, copyTaskLocals: false, inheritContext: false, enqueueJob: false, - addPendingGroupTaskUnconditionally: false) + addPendingTaskUnconditionally: false) let (sleepTask, _) = Builtin.createAsyncTask(sleepTaskFlags) { onSleepWake(wordPtr) } diff --git a/stdlib/public/Concurrency/Task.cpp b/stdlib/public/Concurrency/Task.cpp index 2277f1b4c751c..02f7740be0bcf 100644 --- a/stdlib/public/Concurrency/Task.cpp +++ b/stdlib/public/Concurrency/Task.cpp @@ -739,7 +739,7 @@ static AsyncTaskAndContext swift_task_create_commonImpl( } // Add to the task group or pool, if requested. - if (taskCreateFlags.addPendingGroupTaskUnconditionally()) { // TODO: rename the flag + if (taskCreateFlags.addPendingTaskUnconditionally()) { // TODO: rename the flag if (group) { swift_taskGroup_addPending(group, /*unconditionally=*/true); } else if (pool) { diff --git a/stdlib/public/Concurrency/Task.swift b/stdlib/public/Concurrency/Task.swift index 3b6b47e13ab85..2bb3b445f5dcf 100644 --- a/stdlib/public/Concurrency/Task.swift +++ b/stdlib/public/Concurrency/Task.swift @@ -438,7 +438,7 @@ struct JobFlags { func taskCreateFlags( priority: TaskPriority?, isChildTask: Bool, copyTaskLocals: Bool, inheritContext: Bool, enqueueJob: Bool, - addPendingGroupTaskUnconditionally: Bool + addPendingTaskUnconditionally: Bool ) -> Int { var bits = 0 bits |= (bits & ~0xFF) | Int(priority?.rawValue ?? 0) @@ -454,7 +454,7 @@ func taskCreateFlags( if enqueueJob { bits |= 1 << 12 } - if addPendingGroupTaskUnconditionally { + if addPendingTaskUnconditionally { bits |= 1 << 13 } return bits @@ -508,7 +508,7 @@ extension Task where Failure == Never { let flags = taskCreateFlags( priority: priority, isChildTask: false, copyTaskLocals: true, inheritContext: true, enqueueJob: true, - addPendingGroupTaskUnconditionally: false) + addPendingTaskUnconditionally: false) // Create the asynchronous task. let (task, _) = Builtin.createAsyncTask(flags, operation) @@ -568,7 +568,7 @@ extension Task where Failure == Error { let flags = taskCreateFlags( priority: priority, isChildTask: false, copyTaskLocals: true, inheritContext: true, enqueueJob: true, - addPendingGroupTaskUnconditionally: false + addPendingTaskUnconditionally: false ) // Create the asynchronous task future. @@ -627,7 +627,7 @@ extension Task where Failure == Never { let flags = taskCreateFlags( priority: priority, isChildTask: false, copyTaskLocals: false, inheritContext: false, enqueueJob: true, - addPendingGroupTaskUnconditionally: false) + addPendingTaskUnconditionally: false) // Create the asynchronous task future. let (task, _) = Builtin.createAsyncTask(flags, operation) @@ -686,7 +686,7 @@ extension Task where Failure == Error { let flags = taskCreateFlags( priority: priority, isChildTask: false, copyTaskLocals: false, inheritContext: false, enqueueJob: true, - addPendingGroupTaskUnconditionally: false + addPendingTaskUnconditionally: false ) // Create the asynchronous task future. diff --git a/stdlib/public/Concurrency/TaskGroup.swift b/stdlib/public/Concurrency/TaskGroup.swift index 0802ad8b2e59b..a7b8709746f23 100644 --- a/stdlib/public/Concurrency/TaskGroup.swift +++ b/stdlib/public/Concurrency/TaskGroup.swift @@ -243,13 +243,13 @@ public struct TaskGroup { let flags = taskCreateFlags( priority: priority, isChildTask: true, copyTaskLocals: false, inheritContext: false, enqueueJob: false, - addPendingGroupTaskUnconditionally: true + addPendingTaskUnconditionally: true ) #else let flags = taskCreateFlags( priority: priority, isChildTask: true, copyTaskLocals: false, inheritContext: false, enqueueJob: true, - addPendingGroupTaskUnconditionally: true + addPendingTaskUnconditionally: true ) #endif @@ -285,13 +285,13 @@ public struct TaskGroup { let flags = taskCreateFlags( priority: priority, isChildTask: true, copyTaskLocals: false, inheritContext: false, enqueueJob: false, - addPendingGroupTaskUnconditionally: false + addPendingTaskUnconditionally: false ) #else let flags = taskCreateFlags( priority: priority, isChildTask: true, copyTaskLocals: false, inheritContext: false, enqueueJob: true, - addPendingGroupTaskUnconditionally: false + addPendingTaskUnconditionally: false ) #endif @@ -325,7 +325,7 @@ public struct TaskGroup { let flags = taskCreateFlags( priority: nil, isChildTask: true, copyTaskLocals: false, inheritContext: false, enqueueJob: true, - addPendingGroupTaskUnconditionally: true + addPendingTaskUnconditionally: true ) // Create the task in this group. @@ -365,7 +365,7 @@ public struct TaskGroup { let flags = taskCreateFlags( priority: nil, isChildTask: true, copyTaskLocals: false, inheritContext: false, enqueueJob: true, - addPendingGroupTaskUnconditionally: false + addPendingTaskUnconditionally: false ) // Create the task in this group. @@ -585,7 +585,7 @@ public struct ThrowingTaskGroup { let flags = taskCreateFlags( priority: priority, isChildTask: true, copyTaskLocals: false, inheritContext: false, enqueueJob: true, - addPendingGroupTaskUnconditionally: true + addPendingTaskUnconditionally: true ) // Create the task in this group. @@ -623,7 +623,7 @@ public struct ThrowingTaskGroup { let flags = taskCreateFlags( priority: priority, isChildTask: true, copyTaskLocals: false, inheritContext: false, enqueueJob: true, - addPendingGroupTaskUnconditionally: false + addPendingTaskUnconditionally: false ) // Create the task in this group. @@ -659,7 +659,7 @@ public struct ThrowingTaskGroup { let flags = taskCreateFlags( priority: nil, isChildTask: true, copyTaskLocals: false, inheritContext: false, enqueueJob: true, - addPendingGroupTaskUnconditionally: true + addPendingTaskUnconditionally: true ) // Create the task in this group. @@ -702,7 +702,7 @@ public struct ThrowingTaskGroup { let flags = taskCreateFlags( priority: nil, isChildTask: true, copyTaskLocals: false, inheritContext: false, enqueueJob: true, - addPendingGroupTaskUnconditionally: false + addPendingTaskUnconditionally: false ) // Create the task in this group. diff --git a/stdlib/public/Concurrency/TaskPool.cpp b/stdlib/public/Concurrency/TaskPool.cpp index 2ef26edcc904a..ebf45033c88fc 100644 --- a/stdlib/public/Concurrency/TaskPool.cpp +++ b/stdlib/public/Concurrency/TaskPool.cpp @@ -10,7 +10,7 @@ // //===----------------------------------------------------------------------===// // -// Object management for child tasks that are children of a task group. +// Object management for child tasks that are children of a task pool. // //===----------------------------------------------------------------------===// @@ -61,7 +61,7 @@ class TaskPoolImpl: public TaskPoolTaskStatusRecord { public: /// Describes the status of the group. enum class ReadyStatus : uintptr_t { - /// The task group is empty, no tasks are pending. + /// The task pool is empty, no tasks are pending. /// Return immediately, there is no point in suspending. /// /// The storage is not accessible. @@ -187,13 +187,13 @@ class TaskPoolImpl: public TaskPoolTaskStatusRecord { #if SWIFT_STDLIB_SINGLE_THREADED_CONCURRENCY || SWIFT_CONCURRENCY_TASK_TO_THREAD_MODEL // Synchronization is simple here. In a single threaded mode, all swift tasks // run on a single thread so no coordination is needed. In a task-to-thread - // model, only the parent task which created the task group can + // model, only the parent task which created the task pool can // - // (a) add child tasks to a group + // (a) add child tasks to a pool // (b) run the child tasks // // So we shouldn't need to worry about coordinating between child tasks and - // parents in a task group + // parents in a task pool void lock() const {} void unlock() const {} #else @@ -242,7 +242,7 @@ class TaskPoolImpl: public TaskPoolTaskStatusRecord { return oldStatus.isCancelled(); } - /// Cancel the task group and all tasks within it. + /// Cancel the task pool and all tasks within it. /// /// Returns `true` if this is the first time cancelling the group, false otherwise. bool cancelAll(); @@ -424,7 +424,7 @@ static void swift_taskPool_initializeImpl(TaskPool *pool, const Metadata *Void) void TaskPool::addChildTask(AsyncTask *child) { SWIFT_TASK_DEBUG_LOG("attach child task = %p to pool = %p", child, this); - // Add the child task to this task group. The corresponding removal + // Add the child task to this task pool. The corresponding removal // won't happen until the parent task successfully polls for this child // task, either synchronously in poll (if a task is available // synchronously) or asynchronously in offer (otherwise). In either @@ -432,7 +432,7 @@ void TaskPool::addChildTask(AsyncTask *child) { // The task status record lock is held during this operation, which // prevents us from racing with cancellation or escalation. We don't - // need to acquire the task group lock because the child list is only + // need to acquire the task pool lock because the child list is only // accessed under the task status record lock. auto record = asImpl(this)->getTaskRecord(); record->attachChild(child); @@ -872,9 +872,9 @@ bool TaskPoolImpl::cancelAll() { SWIFT_CC(swift) static void swift_task_cancel_pool_child_tasksImpl(TaskPool *pool) { - // TaskPool is not a Sendable type, and so this operation (which is not - // currently exposed in the API) can only be called from the owning - // task. This satisfies the precondition on cancelAllChildren(). + // TaskPool is not a Sendable type, and so this operation can + // only be called from the owning task. + // This satisfies the precondition on cancelAllChildren(). _swift_taskPool_cancelAllChildren(pool); } @@ -898,7 +898,6 @@ void swift::_swift_taskPool_cancelAllChildren(TaskPool *pool) { SWIFT_CC(swift) static bool swift_taskPool_addPendingImpl(TaskPool *pool, bool unconditionally) { - fprintf(stderr, "[%s:%d](%s) add pending task to pool = %p\n", __FILE_NAME__, __LINE__, __FUNCTION__, pool); auto assumed = asImpl(pool)->statusAddPendingTaskRelaxed(unconditionally); SWIFT_TASK_DEBUG_LOG("add pending %s to pool %p, tasks pending = %d", unconditionally ? "unconditionally" : "", diff --git a/stdlib/public/Concurrency/TaskPool.swift b/stdlib/public/Concurrency/TaskPool.swift index ff8b0f86babee..168ed385ae207 100644 --- a/stdlib/public/Concurrency/TaskPool.swift +++ b/stdlib/public/Concurrency/TaskPool.swift @@ -23,18 +23,39 @@ import Darwin /// submitting work using child tasks, where the results of those child tasks do not need to be collected. /// /// A task pool cannot be iterated over and its child tasks cannot be awaited on explicitly. +/// /// Task pool tasks are immediately removed from the pool as soon as they complete, /// this is the primary difference from a task pool which stores results (and thus retains the results), /// until they are consumed. /// -/// Similarly to a `TaskGroup` a `TaskPool` awaits all tasks that are submitted to it before returning +/// Similarly to a ``TaskGroup`` a `TaskPool` awaits all tasks that are submitted to it before returning /// from the `withTaskPool` call. /// -/// Task Group Cancellation +/// Typical usage of a ``TaskPool`` to handle a stream of incoming requests may look something like this: +/// +/// ``` +/// try await withTaskPool() { pool in +/// for try await request in self.requests { // may throw on cancellation +/// pool.addTaskUnlessCancelled { +/// await handler.handle(request, response: responseWriter) // handler is responsible for writing response +/// } +/// } +/// } +/// ``` +/// +/// It is typical for servers to want to consume incoming requests as fast as possible, and feed them into the +/// some `handler` that will perform the actual invocation of user-code and write the response back to the network. +/// +/// > Note: Note that a `TaskPool` does not guarantee execution order of tasks added using `addTask`, +/// > they are subject to the same scheduling rules as a `TaskGroup`, and should be used in cases where parallelism +/// > is desired, rather than a strict one-by-one execution, which can be achieved by not creating additional tasks +/// > for the processing of the requests. +/// +/// Task Pool Cancellation /// ======================= /// /// You can cancel a task pool and all of its child tasks -/// by calling the `cancelAll()` method on the task pool, +/// by calling the ``cancelAll()`` method on the task pool, /// or by canceling the task in which the pool is running. /// /// If you call `addTask(priority:operation:)` to create a new task in a canceled pool, @@ -117,18 +138,18 @@ public struct TaskPool { priority: TaskPriority? = nil, operation: __owned @Sendable @escaping () async -> Void ) { -#if compiler(>=5.5) && $BuiltinCreateAsyncTaskInGroup +#if compiler(>=5.5) && $BuiltinCreateAsyncTaskInPool #if SWIFT_STDLIB_TASK_TO_THREAD_MODEL_CONCURRENCY let flags = taskCreateFlags( priority: priority, isChildTask: true, copyTaskLocals: false, inheritContext: false, enqueueJob: false, - addPendingGroupTaskUnconditionally: true + addPendingTaskUnconditionally: true ) #else let flags = taskCreateFlags( priority: priority, isChildTask: true, copyTaskLocals: false, inheritContext: false, enqueueJob: true, - addPendingGroupTaskUnconditionally: true + addPendingTaskUnconditionally: true ) #endif @@ -153,7 +174,7 @@ public struct TaskPool { priority: TaskPriority? = nil, operation: __owned @Sendable @escaping () async -> Void ) -> Bool { -#if compiler(>=5.5) && $BuiltinCreateAsyncTaskInGroup +#if compiler(>=5.5) && $BuiltinCreateAsyncTaskInPool let canAdd = _taskPoolAddPendingTask(pool: _pool, unconditionally: false) guard canAdd else { @@ -164,18 +185,18 @@ public struct TaskPool { let flags = taskCreateFlags( priority: priority, isChildTask: true, copyTaskLocals: false, inheritContext: false, enqueueJob: false, - addPendingGroupTaskUnconditionally: false + addPendingTaskUnconditionally: false ) #else let flags = taskCreateFlags( priority: priority, isChildTask: true, copyTaskLocals: false, inheritContext: false, enqueueJob: true, - addPendingGroupTaskUnconditionally: false + addPendingTaskUnconditionally: false ) #endif // Create the task in this pool. - _ = Builtin.createAsyncTaskInGroup(flags, _pool, operation) + _ = Builtin.createAsyncTaskInPool(flags, _pool, operation) return true #else @@ -200,15 +221,15 @@ public struct TaskPool { public mutating func addTask( operation: __owned @Sendable @escaping () async -> Void ) { -#if compiler(>=5.5) && $BuiltinCreateAsyncTaskInGroup +#if compiler(>=5.5) && $BuiltinCreateAsyncTaskInPool let flags = taskCreateFlags( priority: nil, isChildTask: true, copyTaskLocals: false, inheritContext: false, enqueueJob: true, - addPendingGroupTaskUnconditionally: true + addPendingTaskUnconditionally: true ) // Create the task in this pool. - _ = Builtin.createAsyncTaskInGroup(flags, _pool, operation) + _ = Builtin.createAsyncTaskInPool(flags, _pool, operation) #else fatalError("Unsupported Swift compiler") #endif @@ -233,7 +254,7 @@ public struct TaskPool { public mutating func addTaskUnlessCancelled( operation: __owned @Sendable @escaping () async -> Void ) -> Bool { -#if compiler(>=5.5) && $BuiltinCreateAsyncTaskInGroup +#if compiler(>=5.5) && $BuiltinCreateAsyncTaskInPool let canAdd = _taskPoolAddPendingTask(pool: _pool, unconditionally: false) guard canAdd else { @@ -244,7 +265,7 @@ public struct TaskPool { let flags = taskCreateFlags( priority: nil, isChildTask: true, copyTaskLocals: false, inheritContext: false, enqueueJob: true, - addPendingGroupTaskUnconditionally: false + addPendingTaskUnconditionally: false ) // Create the task in this pool. diff --git a/stdlib/public/Concurrency/TaskSleep.swift b/stdlib/public/Concurrency/TaskSleep.swift index 043d4a3d31b9d..b9ea34713d312 100644 --- a/stdlib/public/Concurrency/TaskSleep.swift +++ b/stdlib/public/Concurrency/TaskSleep.swift @@ -240,7 +240,7 @@ extension Task where Success == Never, Failure == Never { let sleepTaskFlags = taskCreateFlags( priority: nil, isChildTask: false, copyTaskLocals: false, inheritContext: false, enqueueJob: false, - addPendingGroupTaskUnconditionally: false) + addPendingTaskUnconditionally: false) let (sleepTask, _) = Builtin.createAsyncTask(sleepTaskFlags) { onSleepWake(wordPtr) } diff --git a/stdlib/public/Concurrency/TaskSleepDuration.swift b/stdlib/public/Concurrency/TaskSleepDuration.swift index 8b7629d333ecc..4839de1af441d 100644 --- a/stdlib/public/Concurrency/TaskSleepDuration.swift +++ b/stdlib/public/Concurrency/TaskSleepDuration.swift @@ -58,7 +58,7 @@ extension Task where Success == Never, Failure == Never { let sleepTaskFlags = taskCreateFlags( priority: nil, isChildTask: false, copyTaskLocals: false, inheritContext: false, enqueueJob: false, - addPendingGroupTaskUnconditionally: false) + addPendingTaskUnconditionally: false) let (sleepTask, _) = Builtin.createAsyncTask(sleepTaskFlags) { onSleepWake(wordPtr) } diff --git a/test/Concurrency/Runtime/async_taskpool_dontLeakTasks.swift b/test/Concurrency/Runtime/async_taskpool_dontLeakTasks.swift new file mode 100644 index 0000000000000..e160ccd504fb1 --- /dev/null +++ b/test/Concurrency/Runtime/async_taskpool_dontLeakTasks.swift @@ -0,0 +1,49 @@ +// RUN: %target-run-simple-swift( -Xfrontend -disable-availability-checking -parse-as-library) 2>&1 | %FileCheck %s --dump-input=always +// REQUIRES: executable_test +// REQUIRES: concurrency +// REQUIRES: swift_task_debug_log + +// REQUIRES: concurrency_runtime +// UNSUPPORTED: back_deployment_runtime + +#if os(Linux) +import Glibc +#elseif os(Windows) +import MSVCRT +#else +import Darwin +#endif + +func test_taskPool_next() async { + // CHECK: creating task [[MAIN_TASK:0x.*]] with parent 0x0 + // CHECK: creating task [[POOL_TASK_1:0x.*]] with parent [[MAIN_TASK]] + // CHECK: creating task [[POOL_TASK_2:0x.*]] with parent [[MAIN_TASK]] + // CHECK: creating task [[POOL_TASK_3:0x.*]] with parent [[MAIN_TASK]] + // CHECK: creating task [[POOL_TASK_4:0x.*]] with parent [[MAIN_TASK]] + // CHECK: creating task [[POOL_TASK_5:0x.*]] with parent [[MAIN_TASK]] + + _ = await withTaskPool(returning: Int.self) { pool in + for n in 0..<5 { + pool.addTask { + return n + } + } + return 0 + } + // as we exit the pool, it must be guaranteed that its child tasks were destroyed + // + // NOTE: there is no great way to express "any of POOL_TASK_n", + // so we just check that 5 tasks were destroyed + // + // CHECK: destroy task [[DESTROY_POOL_TASK_1:0x.*]] + // CHECK: destroy task [[DESTROY_POOL_TASK_2:0x.*]] + // CHECK: destroy task [[DESTROY_POOL_TASK_3:0x.*]] + // CHECK: destroy task [[DESTROY_POOL_TASK_4:0x.*]] + // CHECK: destroy task [[DESTROY_POOL_TASK_5:0x.*]] +} + +@main struct Main { + static func main() async { + await test_taskPool_next() + } +} From e5c78c727a094ddc84ed91a70390398c1d2de6ef Mon Sep 17 00:00:00 2001 From: Konrad `ktoso` Malawski Date: Mon, 28 Nov 2022 12:43:23 +0900 Subject: [PATCH 5/8] fix compile error --- stdlib/public/Concurrency/TaskGroup.cpp | 3 --- 1 file changed, 3 deletions(-) diff --git a/stdlib/public/Concurrency/TaskGroup.cpp b/stdlib/public/Concurrency/TaskGroup.cpp index 238217a17d620..8315793070c40 100644 --- a/stdlib/public/Concurrency/TaskGroup.cpp +++ b/stdlib/public/Concurrency/TaskGroup.cpp @@ -575,9 +575,6 @@ static void fillGroupNextResult(TaskFutureWaitAsyncContext *context, case PollStatus::MustWait: assert(false && "filling a waiting status?"); return; - case PollStatus::Empty: - assert(false && "filling from empty status?"); - return; case PollStatus::Error: { context->fillWithError(reinterpret_cast(result.storage)); From 4232bcb10efe4a0e87893f27b89cd7ff3baca197 Mon Sep 17 00:00:00 2001 From: Konrad `ktoso` Malawski Date: Mon, 28 Nov 2022 12:44:04 +0900 Subject: [PATCH 6/8] [TaskPool] explicitly handle overflow; though this matters more for the group --- stdlib/public/Concurrency/TaskPool.cpp | 64 ++++++++++++++++++-------- 1 file changed, 46 insertions(+), 18 deletions(-) diff --git a/stdlib/public/Concurrency/TaskPool.cpp b/stdlib/public/Concurrency/TaskPool.cpp index ebf45033c88fc..db79a01ad52e0 100644 --- a/stdlib/public/Concurrency/TaskPool.cpp +++ b/stdlib/public/Concurrency/TaskPool.cpp @@ -94,31 +94,17 @@ class TaskPoolImpl: public TaskPoolTaskStatusRecord { /// The result of waiting on the TaskPoolImpl. struct PollResult { - PollStatus status; // TODO: pack it into storage pointer or not worth it? + PollStatus status; - static PollResult get(AsyncTask *asyncTask, bool hadErrorResult) { + static PollResult get(AsyncTask *asyncTask) { // A TaskPool task is always Void, so we don't even have to collect the result from its future fragment. return PollResult{ - /*status*/ hadErrorResult ? - PollStatus::Error : - PollStatus::Success + /*status*/ PollStatus::Success }; } }; - /// An item within the pending queue. - struct PendingQueueItem { - AsyncTask * const storage; - - AsyncTask *getTask() const { - return storage; - } - - static PendingQueueItem get(AsyncTask *task) { - assert(task == nullptr || task->isFuture()); - return PendingQueueItem{task}; - } - }; + const size_t TaskPoolMaximumPendingTasks = 0b0011111111111111111111111111111111111111111111111111111111111111; struct PoolStatus { static const uint64_t cancelled = 0b1000000000000000000000000000000000000000000000000000000000000000; @@ -183,6 +169,43 @@ class TaskPoolImpl: public TaskPoolTaskStatusRecord { }; }; + // ============================================================================= + // ==== Checks ----------------------------------------------------------------- + + static void _taskPool_reportPendingTaskOverflow(TaskPoolImpl *pool, uint64_t pendingCount) { + + char *message; + swift_asprintf( + &message, + "error: task-pool: detected pending task overflow, in task pool %p! Pending task count: %ull", + pool, pendingCount); + + if (_swift_shouldReportFatalErrorsToDebugger()) { + RuntimeErrorDetails details = { + .version = RuntimeErrorDetails::currentVersion, + .errorType = "task-pool-violation", + .currentStackDescription = "Task-pool exceeded supported pending task count", + .framesToSkip = 1, + }; + _swift_reportToDebugger(RuntimeErrorFlagFatal, message, &details); + } + +#if defined(_WIN32) + #define STDERR_FILENO 2 + _write(STDERR_FILENO, message, strlen(message)); +#else + write(STDERR_FILENO, message, strlen(message)); +#endif +#if defined(__APPLE__) + asl_log(nullptr, nullptr, ASL_LEVEL_ERR, "%s", message); +#elif defined(__ANDROID__) + __android_log_print(ANDROID_LOG_FATAL, "SwiftRuntime", "%s", message); +#endif + + free(message); + abort(); + } + private: #if SWIFT_STDLIB_SINGLE_THREADED_CONCURRENCY || SWIFT_CONCURRENCY_TASK_TO_THREAD_MODEL // Synchronization is simple here. In a single threaded mode, all swift tasks @@ -312,6 +335,11 @@ class TaskPoolImpl: public TaskPoolTaskStatusRecord { PoolStatus statusAddPendingTaskRelaxed(bool unconditionally) { auto assumed = statusIncrementPendingAssumeAcquire(); + if (assumed.pendingTasks() == TaskPoolMaximumPendingTasks) { + _taskPool_reportPendingTaskOverflow(this, assumed.pendingTasks()); + llvm_unreachable(); // reportOverflow will abort(); + } + if (!unconditionally && assumed.isCancelled()) { // revert that add, it was meaningless return statusUndoIncrementPendingAssumeRelaxed(); From 201c3c7c4790eea31fa76a61df9e5be464bd58f2 Mon Sep 17 00:00:00 2001 From: Konrad `ktoso` Malawski Date: Mon, 28 Nov 2022 12:44:27 +0900 Subject: [PATCH 7/8] cleanups --- stdlib/public/Concurrency/TaskPool.cpp | 117 +++++------------------ stdlib/public/Concurrency/TaskStatus.cpp | 3 - 2 files changed, 23 insertions(+), 97 deletions(-) diff --git a/stdlib/public/Concurrency/TaskPool.cpp b/stdlib/public/Concurrency/TaskPool.cpp index db79a01ad52e0..3161e8e39f7b7 100644 --- a/stdlib/public/Concurrency/TaskPool.cpp +++ b/stdlib/public/Concurrency/TaskPool.cpp @@ -234,8 +234,7 @@ class TaskPoolImpl: public TaskPoolTaskStatusRecord { /// task can ever be waiting on a group, this is just either a reference /// to that task or null. std::atomic waitQueue; - - + friend class ::swift::AsyncTask; public: @@ -256,13 +255,11 @@ class TaskPoolImpl: public TaskPoolTaskStatusRecord { void destroy(); bool isEmpty() { - auto oldStatus = PoolStatus{status.load(std::memory_order_relaxed)}; - return oldStatus.pendingTasks() == 0; + return statusLoadRelaxed().pendingTasks() == 0; } bool isCancelled() { - auto oldStatus = PoolStatus{status.load(std::memory_order_relaxed)}; - return oldStatus.isCancelled(); + return statusLoadRelaxed().isCancelled(); } /// Cancel the task pool and all tasks within it. @@ -278,11 +275,12 @@ class TaskPoolImpl: public TaskPoolTaskStatusRecord { /// Returns *assumed* new status, including the just performed +1. PoolStatus statusMarkWaitingAssumeAcquire() { - auto old = status.fetch_or(PoolStatus::waiting, std::memory_order_acquire); + auto old = status.fetch_or(PoolStatus::waiting, + std::memory_order_acquire); return PoolStatus{old | PoolStatus::waiting}; } - PoolStatus statusRemoveWaiting() { + PoolStatus statusRemoveWaitingRelease() { auto old = status.fetch_and(~PoolStatus::waiting, std::memory_order_release); return PoolStatus{old}; @@ -352,25 +350,6 @@ class TaskPoolImpl: public TaskPoolTaskStatusRecord { return PoolStatus{status.load(std::memory_order_relaxed)}; } -// /// Compare-and-set old status to a status derived from the old one, -// /// by simultaneously decrementing one Pending and one Waiting tasks. -// /// -// /// This is used to atomically perform a waiting task completion. -// bool statusCompletePendingReadyWaiting(PoolStatus &old) { -// return status.compare_exchange_strong( -// old.status, old.completingPendingReadyWaiting().status, -// /*success*/ std::memory_order_relaxed, -// /*failure*/ std::memory_order_relaxed); -// } -// -// bool statusCompletePendingReady(PoolStatus &old) { -// return status.compare_exchange_strong( -// old.status, old.completingPendingReady().status, -// /*success*/ std::memory_order_relaxed, -// /*failure*/ std::memory_order_relaxed); -// } - - /// Offer result of a task into this task pool. /// /// Unlike a task group, result values are never stored and we immediately @@ -392,9 +371,6 @@ class TaskPoolImpl: public TaskPoolTaskStatusRecord { /// and the waitingTask eventually be woken up by a completion. PollResult waitAll(AsyncTask *waitingTask); -//private: -// /// Enqueue the completed task onto ready queue if there are no waiting tasks yet -// PoolStatus completeTask(AsyncTask *completedTask); }; } // end anonymous namespace @@ -429,7 +405,7 @@ TaskPoolTaskStatusRecord * TaskPool::getTaskRecord() { // Initializes into the preallocated _pool an actual TaskPoolImpl. SWIFT_CC(swift) static void swift_taskPool_initializeImpl(TaskPool *pool, const Metadata *Void) { - SWIFT_TASK_DEBUG_LOG("creating task pool = %p", pool); + SWIFT_TASK_DEBUG_LOG("pool(%p) create", pool); TaskPoolImpl *impl = ::new (pool) TaskPoolImpl(Void); auto record = impl->getTaskRecord(); @@ -452,16 +428,14 @@ static void swift_taskPool_initializeImpl(TaskPool *pool, const Metadata *Void) void TaskPool::addChildTask(AsyncTask *child) { SWIFT_TASK_DEBUG_LOG("attach child task = %p to pool = %p", child, this); - // Add the child task to this task pool. The corresponding removal - // won't happen until the parent task successfully polls for this child - // task, either synchronously in poll (if a task is available - // synchronously) or asynchronously in offer (otherwise). In either - // case, the work ends up being non-concurrent with the parent task. + // Add the child task to this task pool. + // + // The corresponding removal WILL happen concurrently and must be synchronized + // using the task record lock of the child's parent. This is different from a + // task group, where removals are non-concurrent to their parent! - // The task status record lock is held during this operation, which - // prevents us from racing with cancellation or escalation. We don't - // need to acquire the task pool lock because the child list is only - // accessed under the task status record lock. + // Since calls to addChildTask must be holding the task status record lock, + // we can proceed to attach the child without additional locking here. auto record = asImpl(this)->getTaskRecord(); record->attachChild(child); } @@ -516,29 +490,6 @@ bool TaskPool::isCancelled() { return asImpl(this)->isCancelled(); } -//static void fillPoolNextResult(TaskFutureWaitAsyncContext *context, -// PollResult result) { -// /// Fill in the result value -// switch (result.status) { -// case PollStatus::MustWait: -// assert(false && "filling a waiting status?"); -// return; -// -// case PollStatus::Error: { -// context->fillWithError(reinterpret_cast(result.storage)); -// return; -// } -// -// case PollStatus::Success: { -// // Initialize the result as an Optional. -// const Metadata *voidType = result.voidType; -// OpaqueValue *destPtr = context->successResultPointer; -// voidType->vw_storeEnumTagSinglePayload(destPtr, 1, 1); -// return; -// } -// } -//} - static void fillPoolNextVoidResult(TaskFutureWaitAsyncContext *context, const Metadata *voidType, PollResult result) { @@ -576,17 +527,6 @@ static void fillPoolNextVoidResult(TaskFutureWaitAsyncContext *context, } } -//// TaskPool is locked upon entry and exit -//TaskPoolImpl::PoolStatus TaskPoolImpl::completeTask(AsyncTask *completedTask) { -// SWIFT_TASK_DEBUG_LOG("pool does not retain tasks for their results; we're done here = %p", completedTask); -// // DO NOT RETAIN THE TASK. -// // We know it is Void, so we don't need to store the result; -// // By releasing tasks eagerly we're able to keep "infinite" task groups, -// // running, that never consume their values. Even more-so, -// -// return this->statusDecrementPendingAssumeAcquire(); -//} - void TaskPoolImpl::offer(AsyncTask *completedTask, AsyncContext *context) { assert(completedTask); assert(completedTask->isFuture()); @@ -734,8 +674,8 @@ static void swift_taskPool_waitAllImpl( TaskPool *_pool, ThrowingTaskFutureWaitContinuationFunction *resumeFunction, AsyncContext *rawContext) { - fprintf(stderr, "[%s:%d](%s) wait all; pool = %p\n", __FILE_NAME__, __LINE__, __FUNCTION__, _pool); - auto waitingTask = swift_task_getCurrent(); + SWIFT_TASK_DEBUG_LOG("pool(%p) waitAll, no ready tasks, waiting task = %p", + pool, waitingTask); auto waitingTask = swift_task_getCurrent(); waitingTask->ResumeTask = TASK_POOL_wait_resume_adapter; waitingTask->ResumeContext = rawContext; @@ -752,7 +692,7 @@ static void swift_taskPool_waitAllImpl( PollResult polled = pool->waitAll(waitingTask); switch (polled.status) { case PollStatus::MustWait: - SWIFT_TASK_DEBUG_LOG("poll pool = %p, no ready tasks, waiting task = %p", + SWIFT_TASK_DEBUG_LOG("pool(%p) poll, pending tasks exist, waiting task = %p", pool, waitingTask); // The waiting task has been queued on the channel, // there were pending tasks so it will be woken up eventually. @@ -766,28 +706,17 @@ static void swift_taskPool_waitAllImpl( case PollStatus::Empty: case PollStatus::Error: case PollStatus::Success: - SWIFT_TASK_DEBUG_LOG("[pool:%p] poll, task = %p", pool, waitingTask); -// if (pool->eagerlyReleaseCompleteTasks) { + /// Anything else than a "MustWait" can be treated as a successful poll. + /// Only if there are in flight pending tasks do we need to wait after all. + SWIFT_TASK_DEBUG_LOG("[pool:%p] poll successful, waiting task = %p", pool, waitingTask); fillPoolNextVoidResult(context, pool->voidType, polled); -// } else { -// fillPoolNextResult(context, polled); -// } -// if (auto completedTask = polled.retainedTask) { -// // Remove the child from the task pool's running tasks list. -// _swift_taskPool_detachChild(asAbstract(pool), completedTask); -// -// // Balance the retain done by completeTask. -// swift_release(completedTask); -// } return waitingTask->runInFullyEstablishedContext(); } } PollResult TaskPoolImpl::waitAll(AsyncTask *waitingTask) { - lock(); // TODO: remove pool lock, and use status for synchronization SWIFT_TASK_DEBUG_LOG("[pool:%p], waitAll pending; status = %s", this, statusLoadRelaxed().to_string().c_str()); - PollResult result; // Have we suspended the task? @@ -802,16 +731,16 @@ reevaluate_if_taskpool_has_results:; // No tasks in flight, we know no tasks were submitted before this poll // was issued, and if we parked here we'd potentially never be woken up. // Bail out and return `nil` from `group.next()`. - statusRemoveWaiting(); + statusRemoveWaitingRelease(); result.status = PollStatus::Empty; // result.voidType = this->voidType; - unlock(); // TODO: remove pool lock, and use status for synchronization return result; } + lock(); // TODO: remove pool lock, and use status for synchronization auto waitHead = waitQueue.load(std::memory_order_acquire); - // ==== 3) Add to wait queue ------------------------------------------------- + // ==== 2) Add to wait queue ------------------------------------------------- _swift_tsan_release(static_cast(waitingTask)); while (true) { if (!hasSuspended) { diff --git a/stdlib/public/Concurrency/TaskStatus.cpp b/stdlib/public/Concurrency/TaskStatus.cpp index d43720232bfe5..ae828ccf9d855 100644 --- a/stdlib/public/Concurrency/TaskStatus.cpp +++ b/stdlib/public/Concurrency/TaskStatus.cpp @@ -450,9 +450,6 @@ void swift::_swift_taskGroup_detachChild(TaskGroup *group, // FIXME: this is not actually right; is it? are we guaranteeing locking right in a pool? void swift::_swift_taskPool_detachChild(TaskPool *pool, AsyncTask *child) { - // We are called synchronously from the perspective of the owning task. - // That doesn't necessarily mean the owning task *is* the current task, - // though, just that it's not concurrently running. auto parent = child->childFragment()->getParent(); withStatusRecordLock(parent, LockContext::OnTask, [&](ActiveTaskStatus &parentStatus) { From c8ab46ae3fd79b2a8e4b5e2d9ca7bd938eaf97ce Mon Sep 17 00:00:00 2001 From: Konrad `ktoso` Malawski Date: Mon, 28 Nov 2022 12:49:19 +0900 Subject: [PATCH 8/8] missing includes, cleanups, wrong assertions --- stdlib/public/BackDeployConcurrency/Task.cpp | 3 - .../public/BackDeployConcurrency/TaskPool.cpp | 229 +++++++----------- stdlib/public/Concurrency/Task.cpp | 5 - stdlib/public/Concurrency/TaskPool.cpp | 52 ++-- .../async_taskpool_dontLeakTasks.swift | 7 +- 5 files changed, 112 insertions(+), 184 deletions(-) diff --git a/stdlib/public/BackDeployConcurrency/Task.cpp b/stdlib/public/BackDeployConcurrency/Task.cpp index e5cf4c9996b37..3dc10bd3759a7 100644 --- a/stdlib/public/BackDeployConcurrency/Task.cpp +++ b/stdlib/public/BackDeployConcurrency/Task.cpp @@ -148,7 +148,6 @@ FutureFragment::Status AsyncTask::waitFuture(AsyncTask *waitingTask, } void NullaryContinuationJob::process(Job *_job) { - fprintf(stderr, "[%s:%d](%s) process job!\n", __FILE_NAME__, __LINE__, __FUNCTION__); auto *job = cast(_job); auto *task = job->Task; @@ -163,8 +162,6 @@ void NullaryContinuationJob::process(Job *_job) { } void AsyncTask::completeFuture(AsyncContext *context) { - fprintf(stderr, "[%s:%d](%s) complete future!\n", __FILE_NAME__, __LINE__, __FUNCTION__); - using Status = FutureFragment::Status; using WaitQueueItem = FutureFragment::WaitQueueItem; SWIFT_TASK_DEBUG_LOG("complete future = %p", this); diff --git a/stdlib/public/BackDeployConcurrency/TaskPool.cpp b/stdlib/public/BackDeployConcurrency/TaskPool.cpp index 6860e616ff1d0..6d9e75de66569 100644 --- a/stdlib/public/BackDeployConcurrency/TaskPool.cpp +++ b/stdlib/public/BackDeployConcurrency/TaskPool.cpp @@ -33,21 +33,16 @@ #include #include -#if !SWIFT_STDLIB_SINGLE_THREADED_CONCURRENCY -#include +#if defined(__APPLE__) +#include +#elif defined(__ANDROID__) +#include #endif -#include -#if SWIFT_CONCURRENCY_ENABLE_DISPATCH -#include +#if defined(_WIN32) +#include #endif -#if !defined(_WIN32) && !defined(__wasi__) && __has_include() -#include -#endif - -using namespace swift; - #if !SWIFT_STDLIB_SINGLE_THREADED_CONCURRENCY #include #endif @@ -76,7 +71,7 @@ namespace { public: /// Describes the status of the group. enum class ReadyStatus : uintptr_t { - /// The task group is empty, no tasks are pending. + /// The task pool is empty, no tasks are pending. /// Return immediately, there is no point in suspending. /// /// The storage is not accessible. @@ -109,31 +104,17 @@ namespace { /// The result of waiting on the TaskPoolImpl. struct PollResult { - PollStatus status; // TODO: pack it into storage pointer or not worth it? + PollStatus status; - static PollResult get(AsyncTask *asyncTask, bool hadErrorResult) { + static PollResult get(AsyncTask *asyncTask) { // A TaskPool task is always Void, so we don't even have to collect the result from its future fragment. return PollResult{ - /*status*/ hadErrorResult ? - PollStatus::Error : - PollStatus::Success + /*status*/ PollStatus::Success }; } }; - /// An item within the pending queue. - struct PendingQueueItem { - AsyncTask * const storage; - - AsyncTask *getTask() const { - return storage; - } - - static PendingQueueItem get(AsyncTask *task) { - assert(task == nullptr || task->isFuture()); - return PendingQueueItem{task}; - } - }; + const size_t TaskPoolMaximumPendingTasks = 0b0011111111111111111111111111111111111111111111111111111111111111; struct PoolStatus { static const uint64_t cancelled = 0b1000000000000000000000000000000000000000000000000000000000000000; @@ -198,17 +179,54 @@ namespace { }; }; + // ============================================================================= + // ==== Checks ----------------------------------------------------------------- + + static void _taskPool_reportPendingTaskOverflow(TaskPoolImpl *pool, uint64_t pendingCount) { + + char *message; + swift_asprintf( + &message, + "error: task-pool: detected pending task overflow, in task pool %p! Pending task count: %llu", + pool, pendingCount); + + if (_swift_shouldReportFatalErrorsToDebugger()) { + RuntimeErrorDetails details = { + .version = RuntimeErrorDetails::currentVersion, + .errorType = "task-pool-violation", + .currentStackDescription = "Task-pool exceeded supported pending task count", + .framesToSkip = 1, + }; + _swift_reportToDebugger(RuntimeErrorFlagFatal, message, &details); + } + +#if defined(_WIN32) + #define STDERR_FILENO 2 + _write(STDERR_FILENO, message, strlen(message)); +#else + write(STDERR_FILENO, message, strlen(message)); +#endif +#if defined(__APPLE__) + asl_log(nullptr, nullptr, ASL_LEVEL_ERR, "%s", message); +#elif defined(__ANDROID__) + __android_log_print(ANDROID_LOG_FATAL, "SwiftRuntime", "%s", message); +#endif + + free(message); + abort(); + } + private: #if SWIFT_STDLIB_SINGLE_THREADED_CONCURRENCY || SWIFT_CONCURRENCY_TASK_TO_THREAD_MODEL // Synchronization is simple here. In a single threaded mode, all swift tasks // run on a single thread so no coordination is needed. In a task-to-thread - // model, only the parent task which created the task group can + // model, only the parent task which created the task pool can // - // (a) add child tasks to a group + // (a) add child tasks to a pool // (b) run the child tasks // // So we shouldn't need to worry about coordinating between child tasks and - // parents in a task group + // parents in a task pool void lock() const {} void unlock() const {} #else @@ -227,7 +245,6 @@ namespace { /// to that task or null. std::atomic waitQueue; - friend class ::swift::AsyncTask; public: @@ -248,16 +265,14 @@ namespace { void destroy(); bool isEmpty() { - auto oldStatus = PoolStatus{status.load(std::memory_order_relaxed)}; - return oldStatus.pendingTasks() == 0; + return statusLoadRelaxed().pendingTasks() == 0; } bool isCancelled() { - auto oldStatus = PoolStatus{status.load(std::memory_order_relaxed)}; - return oldStatus.isCancelled(); + return statusLoadRelaxed().isCancelled(); } - /// Cancel the task group and all tasks within it. + /// Cancel the task pool and all tasks within it. /// /// Returns `true` if this is the first time cancelling the group, false otherwise. bool cancelAll(); @@ -270,11 +285,12 @@ namespace { /// Returns *assumed* new status, including the just performed +1. PoolStatus statusMarkWaitingAssumeAcquire() { - auto old = status.fetch_or(PoolStatus::waiting, std::memory_order_acquire); + auto old = status.fetch_or(PoolStatus::waiting, + std::memory_order_acquire); return PoolStatus{old | PoolStatus::waiting}; } - PoolStatus statusRemoveWaiting() { + PoolStatus statusRemoveWaitingRelease() { auto old = status.fetch_and(~PoolStatus::waiting, std::memory_order_release); return PoolStatus{old}; @@ -327,11 +343,15 @@ namespace { PoolStatus statusAddPendingTaskRelaxed(bool unconditionally) { auto assumed = statusIncrementPendingAssumeAcquire(); + if (assumed.pendingTasks() == TaskPoolMaximumPendingTasks) { + _taskPool_reportPendingTaskOverflow(this, assumed.pendingTasks()); + abort(); // reportOverflow will abort(); + } + if (!unconditionally && assumed.isCancelled()) { // revert that add, it was meaningless return statusUndoIncrementPendingAssumeRelaxed(); } - fprintf(stderr, "[%s:%d](%s) status after add: %s\n", __FILE_NAME__, __LINE__, __FUNCTION__, assumed.to_string().c_str()); return assumed; } @@ -339,25 +359,6 @@ namespace { return PoolStatus{status.load(std::memory_order_relaxed)}; } -// /// Compare-and-set old status to a status derived from the old one, -// /// by simultaneously decrementing one Pending and one Waiting tasks. -// /// -// /// This is used to atomically perform a waiting task completion. -// bool statusCompletePendingReadyWaiting(PoolStatus &old) { -// return status.compare_exchange_strong( -// old.status, old.completingPendingReadyWaiting().status, -// /*success*/ std::memory_order_relaxed, -// /*failure*/ std::memory_order_relaxed); -// } -// -// bool statusCompletePendingReady(PoolStatus &old) { -// return status.compare_exchange_strong( -// old.status, old.completingPendingReady().status, -// /*success*/ std::memory_order_relaxed, -// /*failure*/ std::memory_order_relaxed); -// } - - /// Offer result of a task into this task pool. /// /// Unlike a task group, result values are never stored and we immediately @@ -379,9 +380,6 @@ namespace { /// and the waitingTask eventually be woken up by a completion. PollResult waitAll(AsyncTask *waitingTask); -//private: -// /// Enqueue the completed task onto ready queue if there are no waiting tasks yet -// PoolStatus completeTask(AsyncTask *completedTask); }; } // end anonymous namespace @@ -416,7 +414,7 @@ TaskPoolTaskStatusRecord * TaskPool::getTaskRecord() { // Initializes into the preallocated _pool an actual TaskPoolImpl. SWIFT_CC(swift) static void swift_taskPool_initializeImpl(TaskPool *pool, const Metadata *Void) { - SWIFT_TASK_DEBUG_LOG("creating task pool = %p", pool); + SWIFT_TASK_DEBUG_LOG("pool(%p) create", pool); TaskPoolImpl *impl = ::new (pool) TaskPoolImpl(Void); auto record = impl->getTaskRecord(); @@ -439,16 +437,14 @@ static void swift_taskPool_initializeImpl(TaskPool *pool, const Metadata *Void) void TaskPool::addChildTask(AsyncTask *child) { SWIFT_TASK_DEBUG_LOG("attach child task = %p to pool = %p", child, this); - // Add the child task to this task group. The corresponding removal - // won't happen until the parent task successfully polls for this child - // task, either synchronously in poll (if a task is available - // synchronously) or asynchronously in offer (otherwise). In either - // case, the work ends up being non-concurrent with the parent task. + // Add the child task to this task pool. + // + // The corresponding removal WILL happen concurrently and must be synchronized + // using the task record lock of the child's parent. This is different from a + // task group, where removals are non-concurrent to their parent! - // The task status record lock is held during this operation, which - // prevents us from racing with cancellation or escalation. We don't - // need to acquire the task group lock because the child list is only - // accessed under the task status record lock. + // Since calls to addChildTask must be holding the task status record lock, + // we can proceed to attach the child without additional locking here. auto record = asImpl(this)->getTaskRecord(); record->attachChild(child); } @@ -503,33 +499,9 @@ bool TaskPool::isCancelled() { return asImpl(this)->isCancelled(); } -//static void fillPoolNextResult(TaskFutureWaitAsyncContext *context, -// PollResult result) { -// /// Fill in the result value -// switch (result.status) { -// case PollStatus::MustWait: -// assert(false && "filling a waiting status?"); -// return; -// -// case PollStatus::Error: { -// context->fillWithError(reinterpret_cast(result.storage)); -// return; -// } -// -// case PollStatus::Success: { -// // Initialize the result as an Optional. -// const Metadata *voidType = result.voidType; -// OpaqueValue *destPtr = context->successResultPointer; -// voidType->vw_storeEnumTagSinglePayload(destPtr, 1, 1); -// return; -// } -// } -//} - static void fillPoolNextVoidResult(TaskFutureWaitAsyncContext *context, const Metadata *voidType, PollResult result) { - fprintf(stderr, "[%s:%d](%s) fill in void\n", __FILE_NAME__, __LINE__, __FUNCTION__); /// Fill in the result value switch (result.status) { case PollStatus::MustWait: @@ -563,17 +535,6 @@ static void fillPoolNextVoidResult(TaskFutureWaitAsyncContext *context, } } -//// TaskPool is locked upon entry and exit -//TaskPoolImpl::PoolStatus TaskPoolImpl::completeTask(AsyncTask *completedTask) { -// SWIFT_TASK_DEBUG_LOG("pool does not retain tasks for their results; we're done here = %p", completedTask); -// // DO NOT RETAIN THE TASK. -// // We know it is Void, so we don't need to store the result; -// // By releasing tasks eagerly we're able to keep "infinite" task groups, -// // running, that never consume their values. Even more-so, -// -// return this->statusDecrementPendingAssumeAcquire(); -//} - void TaskPoolImpl::offer(AsyncTask *completedTask, AsyncContext *context) { assert(completedTask); assert(completedTask->isFuture()); @@ -587,18 +548,15 @@ void TaskPoolImpl::offer(AsyncTask *completedTask, AsyncContext *context) { // Immediately decrement the pending count; we do not keep track of "ready" tasks and never store them; // This is different from a task group, which has to keep the pending count and add +1 "ready" when offered to. auto assumed = statusDecrementPendingAssumeAcquire(); - SWIFT_TASK_DEBUG_LOG("pool %p, remaining pending: %d", this, assumed.pendingTasks()); + SWIFT_TASK_DEBUG_LOG("pool(%p), remaining pending: %d", this, assumed.pendingTasks()); auto asyncContextPrefix = reinterpret_cast( reinterpret_cast(context) - sizeof(FutureAsyncContextPrefix)); - bool hadErrorResult = false; - auto errorObject = asyncContextPrefix->errorResult; - if (errorObject) { - // instead, we need to enqueue this result: - hadErrorResult = true; - } SWIFT_TASK_DEBUG_LOG("pool(%p) child task=%p completed, detach", this, completedTask); + /// We're offering concurrently to the parent task (which owns the pool), and must remove + /// the child from the pool record's children container. + /// The _swift_taskPool_detachChild will take the parent task lock to perform the child removal. _swift_taskPool_detachChild(asAbstract(this), completedTask); // ==== a) has waiting task. @@ -639,7 +597,7 @@ void TaskPoolImpl::offer(AsyncTask *completedTask, AsyncContext *context) { #else /* SWIFT_CONCURRENCY_TASK_TO_THREAD_MODEL */ // Run the task. - auto result = PollResult::get(completedTask, hadErrorResult); + auto result = PollResult::get(completedTask); unlock(); // TODO: remove fragment lock, and use status for synchronization @@ -721,7 +679,6 @@ static void swift_taskPool_waitAllImpl( TaskPool *_pool, ThrowingTaskFutureWaitContinuationFunction *resumeFunction, AsyncContext *rawContext) { - fprintf(stderr, "[%s:%d](%s) wait all; pool = %p\n", __FILE_NAME__, __LINE__, __FUNCTION__, _pool); auto waitingTask = swift_task_getCurrent(); waitingTask->ResumeTask = TASK_POOL_wait_resume_adapter; waitingTask->ResumeContext = rawContext; @@ -736,10 +693,13 @@ static void swift_taskPool_waitAllImpl( auto pool = asImpl(_pool); assert(pool && "swift_taskPool_waitAll was passed context without pool!"); + SWIFT_TASK_DEBUG_LOG("pool(%p) waitAll, no ready tasks, waiting task = %p", + pool, waitingTask); + PollResult polled = pool->waitAll(waitingTask); switch (polled.status) { case PollStatus::MustWait: - SWIFT_TASK_DEBUG_LOG("poll pool = %p, no ready tasks, waiting task = %p", + SWIFT_TASK_DEBUG_LOG("pool(%p) poll, pending tasks exist, waiting task = %p", pool, waitingTask); // The waiting task has been queued on the channel, // there were pending tasks so it will be woken up eventually. @@ -753,28 +713,17 @@ static void swift_taskPool_waitAllImpl( case PollStatus::Empty: case PollStatus::Error: case PollStatus::Success: - SWIFT_TASK_DEBUG_LOG("[pool:%p] poll, task = %p", pool, waitingTask); -// if (pool->eagerlyReleaseCompleteTasks) { + /// Anything else than a "MustWait" can be treated as a successful poll. + /// Only if there are in flight pending tasks do we need to wait after all. + SWIFT_TASK_DEBUG_LOG("[pool:%p] poll successful, waiting task = %p", pool, waitingTask); fillPoolNextVoidResult(context, pool->voidType, polled); -// } else { -// fillPoolNextResult(context, polled); -// } -// if (auto completedTask = polled.retainedTask) { -// // Remove the child from the task pool's running tasks list. -// _swift_taskPool_detachChild(asAbstract(pool), completedTask); -// -// // Balance the retain done by completeTask. -// swift_release(completedTask); -// } return waitingTask->runInFullyEstablishedContext(); } } PollResult TaskPoolImpl::waitAll(AsyncTask *waitingTask) { - lock(); // TODO: remove pool lock, and use status for synchronization SWIFT_TASK_DEBUG_LOG("[pool:%p], waitAll pending; status = %s", this, statusLoadRelaxed().to_string().c_str()); - PollResult result; // Have we suspended the task? @@ -789,16 +738,15 @@ PollResult TaskPoolImpl::waitAll(AsyncTask *waitingTask) { // No tasks in flight, we know no tasks were submitted before this poll // was issued, and if we parked here we'd potentially never be woken up. // Bail out and return `nil` from `group.next()`. - statusRemoveWaiting(); + statusRemoveWaitingRelease(); result.status = PollStatus::Empty; - // result.voidType = this->voidType; - unlock(); // TODO: remove pool lock, and use status for synchronization return result; } + lock(); // TODO: remove pool lock, and use status for synchronization auto waitHead = waitQueue.load(std::memory_order_acquire); - // ==== 3) Add to wait queue ------------------------------------------------- + // ==== 2) Add to wait queue ------------------------------------------------- _swift_tsan_release(static_cast(waitingTask)); while (true) { if (!hasSuspended) { @@ -887,9 +835,9 @@ bool TaskPoolImpl::cancelAll() { SWIFT_CC(swift) static void swift_task_cancel_pool_child_tasksImpl(TaskPool *pool) { - // TaskPool is not a Sendable type, and so this operation (which is not - // currently exposed in the API) can only be called from the owning - // task. This satisfies the precondition on cancelAllChildren(). + // TaskPool is not a Sendable type, and so this operation can + // only be called from the owning task. + // This satisfies the precondition on cancelAllChildren(). _swift_taskPool_cancelAllChildren(pool); } @@ -913,7 +861,6 @@ void swift::_swift_taskPool_cancelAllChildren(TaskPool *pool) { SWIFT_CC(swift) static bool swift_taskPool_addPendingImpl(TaskPool *pool, bool unconditionally) { - fprintf(stderr, "[%s:%d](%s) add pending task to pool = %p\n", __FILE_NAME__, __LINE__, __FUNCTION__, pool); auto assumed = asImpl(pool)->statusAddPendingTaskRelaxed(unconditionally); SWIFT_TASK_DEBUG_LOG("add pending %s to pool %p, tasks pending = %d", unconditionally ? "unconditionally" : "", diff --git a/stdlib/public/Concurrency/Task.cpp b/stdlib/public/Concurrency/Task.cpp index 02f7740be0bcf..a787e52400cd6 100644 --- a/stdlib/public/Concurrency/Task.cpp +++ b/stdlib/public/Concurrency/Task.cpp @@ -462,8 +462,6 @@ static void completeTaskImpl(AsyncTask *task, if (task->isFuture()) { task->completeFuture(context); } - - fprintf(stderr, "[%s:%d](%s) completeTaskImpl, task = %p\n", __FILE_NAME__, __LINE__, __FUNCTION__, task); } /// The function that we put in the context of a simple task @@ -696,14 +694,12 @@ static AsyncTaskAndContext swift_task_create_commonImpl( case TaskOptionRecordKind::TaskGroup: group = cast(option)->getGroup(); - fprintf(stderr, "[%s:%d](%s) make pool child task; group = %p\n", __FILE_NAME__, __LINE__, __FUNCTION__, group); assert(group && "Missing group"); jobFlags.task_setIsGroupChildTask(true); break; case TaskOptionRecordKind::TaskPool: pool = cast(option)->getPool(); - fprintf(stderr, "[%s:%d](%s) make pool child task; pool = %p\n", __FILE_NAME__, __LINE__, __FUNCTION__, pool); assert(pool && "Missing pool"); jobFlags.task_setIsPoolChildTask(true); break; @@ -940,7 +936,6 @@ static AsyncTaskAndContext swift_task_create_commonImpl( // Initialize the pool child fragment if applicable. if (pool) { - fprintf(stderr, "[%s:%d](%s) add new task = %p, to pool = %p\n", __FILE_NAME__, __LINE__, __FUNCTION__, task, pool); auto poolChildFragment = task->poolChildFragment(); ::new (poolChildFragment) AsyncTask::PoolChildFragment(pool); } diff --git a/stdlib/public/Concurrency/TaskPool.cpp b/stdlib/public/Concurrency/TaskPool.cpp index 3161e8e39f7b7..e363dceeb1ddc 100644 --- a/stdlib/public/Concurrency/TaskPool.cpp +++ b/stdlib/public/Concurrency/TaskPool.cpp @@ -33,6 +33,16 @@ #include #include +#if defined(__APPLE__) +#include +#elif defined(__ANDROID__) +#include +#endif + +#if defined(_WIN32) +#include +#endif + #if !SWIFT_STDLIB_SINGLE_THREADED_CONCURRENCY #include #endif @@ -173,11 +183,10 @@ class TaskPoolImpl: public TaskPoolTaskStatusRecord { // ==== Checks ----------------------------------------------------------------- static void _taskPool_reportPendingTaskOverflow(TaskPoolImpl *pool, uint64_t pendingCount) { - char *message; swift_asprintf( &message, - "error: task-pool: detected pending task overflow, in task pool %p! Pending task count: %ull", + "error: task-pool: detected pending task overflow, in task pool %p! Pending task count: %llu", pool, pendingCount); if (_swift_shouldReportFatalErrorsToDebugger()) { @@ -335,14 +344,13 @@ class TaskPoolImpl: public TaskPoolTaskStatusRecord { if (assumed.pendingTasks() == TaskPoolMaximumPendingTasks) { _taskPool_reportPendingTaskOverflow(this, assumed.pendingTasks()); - llvm_unreachable(); // reportOverflow will abort(); + abort(); // reportOverflow will abort(); } if (!unconditionally && assumed.isCancelled()) { // revert that add, it was meaningless return statusUndoIncrementPendingAssumeRelaxed(); } - fprintf(stderr, "[%s:%d](%s) status after add: %s\n", __FILE_NAME__, __LINE__, __FUNCTION__, assumed.to_string().c_str()); return assumed; } @@ -493,7 +501,6 @@ bool TaskPool::isCancelled() { static void fillPoolNextVoidResult(TaskFutureWaitAsyncContext *context, const Metadata *voidType, PollResult result) { - fprintf(stderr, "[%s:%d](%s) fill in void\n", __FILE_NAME__, __LINE__, __FUNCTION__); /// Fill in the result value switch (result.status) { case PollStatus::MustWait: @@ -540,18 +547,15 @@ void TaskPoolImpl::offer(AsyncTask *completedTask, AsyncContext *context) { // Immediately decrement the pending count; we do not keep track of "ready" tasks and never store them; // This is different from a task group, which has to keep the pending count and add +1 "ready" when offered to. auto assumed = statusDecrementPendingAssumeAcquire(); - SWIFT_TASK_DEBUG_LOG("pool %p, remaining pending: %d", this, assumed.pendingTasks()); + SWIFT_TASK_DEBUG_LOG("pool(%p), remaining pending: %d", this, assumed.pendingTasks()); auto asyncContextPrefix = reinterpret_cast( reinterpret_cast(context) - sizeof(FutureAsyncContextPrefix)); - bool hadErrorResult = false; - auto errorObject = asyncContextPrefix->errorResult; - if (errorObject) { - // instead, we need to enqueue this result: - hadErrorResult = true; - } SWIFT_TASK_DEBUG_LOG("pool(%p) child task=%p completed, detach", this, completedTask); + /// We're offering concurrently to the parent task (which owns the pool), and must remove + /// the child from the pool record's children container. + /// The _swift_taskPool_detachChild will take the parent task lock to perform the child removal. _swift_taskPool_detachChild(asAbstract(this), completedTask); // ==== a) has waiting task. @@ -564,7 +568,6 @@ void TaskPoolImpl::offer(AsyncTask *completedTask, AsyncContext *context) { while (true) { // ==== a) run waiting task directly ------------------------------------- assert(assumed.hasWaitingTask()); - assert(assumed.pendingTasks() && "offered to pool with no pending tasks!"); // We are the "first" completed task to arrive, // and since there is a task waiting we immediately claim and complete it. if (waitQueue.compare_exchange_strong( @@ -586,13 +589,12 @@ void TaskPoolImpl::offer(AsyncTask *completedTask, AsyncContext *context) { // we can't just have the parent task set itself up as a waiter. // But since it's what we're doing, we basically take the same // path as we would if there wasn't a waiter. -// completeTask(completedTask); unlock(); // TODO: remove fragment lock, and use status for synchronization return; #else /* SWIFT_CONCURRENCY_TASK_TO_THREAD_MODEL */ // Run the task. - auto result = PollResult::get(completedTask, hadErrorResult); + auto result = PollResult::get(completedTask); unlock(); // TODO: remove fragment lock, and use status for synchronization @@ -623,18 +625,9 @@ void TaskPoolImpl::offer(AsyncTask *completedTask, AsyncContext *context) { } } llvm_unreachable("should have enqueued and returned."); - } else { - // ==== b) enqueue completion ------------------------------------------------ - // - // else, no-one was waiting (yet), so we have to instead enqueue to the message - // queue when a task polls during next() it will notice that we have a value - // ready for it, and will process it immediately without suspending. - assert(!waitQueue.load(std::memory_order_relaxed)); - - // completeTask(completedTask); - unlock(); // TODO: remove fragment lock, and use status for synchronization - } + } // else, there still are pending tasks; so there is nothing to wake up just yet + unlock(); // TODO: remove fragment lock, and use status for synchronization return; } @@ -674,8 +667,7 @@ static void swift_taskPool_waitAllImpl( TaskPool *_pool, ThrowingTaskFutureWaitContinuationFunction *resumeFunction, AsyncContext *rawContext) { - SWIFT_TASK_DEBUG_LOG("pool(%p) waitAll, no ready tasks, waiting task = %p", - pool, waitingTask); auto waitingTask = swift_task_getCurrent(); + auto waitingTask = swift_task_getCurrent(); waitingTask->ResumeTask = TASK_POOL_wait_resume_adapter; waitingTask->ResumeContext = rawContext; @@ -689,6 +681,9 @@ static void swift_taskPool_waitAllImpl( auto pool = asImpl(_pool); assert(pool && "swift_taskPool_waitAll was passed context without pool!"); + SWIFT_TASK_DEBUG_LOG("pool(%p) waitAll, no ready tasks, waiting task = %p", + pool, waitingTask); + PollResult polled = pool->waitAll(waitingTask); switch (polled.status) { case PollStatus::MustWait: @@ -733,7 +728,6 @@ reevaluate_if_taskpool_has_results:; // Bail out and return `nil` from `group.next()`. statusRemoveWaitingRelease(); result.status = PollStatus::Empty; - // result.voidType = this->voidType; return result; } diff --git a/test/Concurrency/Runtime/async_taskpool_dontLeakTasks.swift b/test/Concurrency/Runtime/async_taskpool_dontLeakTasks.swift index e160ccd504fb1..992a25f31bce9 100644 --- a/test/Concurrency/Runtime/async_taskpool_dontLeakTasks.swift +++ b/test/Concurrency/Runtime/async_taskpool_dontLeakTasks.swift @@ -16,16 +16,11 @@ import Darwin func test_taskPool_next() async { // CHECK: creating task [[MAIN_TASK:0x.*]] with parent 0x0 - // CHECK: creating task [[POOL_TASK_1:0x.*]] with parent [[MAIN_TASK]] - // CHECK: creating task [[POOL_TASK_2:0x.*]] with parent [[MAIN_TASK]] - // CHECK: creating task [[POOL_TASK_3:0x.*]] with parent [[MAIN_TASK]] - // CHECK: creating task [[POOL_TASK_4:0x.*]] with parent [[MAIN_TASK]] - // CHECK: creating task [[POOL_TASK_5:0x.*]] with parent [[MAIN_TASK]] _ = await withTaskPool(returning: Int.self) { pool in for n in 0..<5 { pool.addTask { - return n + print("run \(n))") } } return 0