Skip to content

Commit e445ff6

Browse files
tomutaawesomekling
authored andcommitted
Kernel: Implement an asynchronous device request stack
This allows issuing asynchronous requests for devices and waiting on the completion of the request. The requests can cascade into multiple sub-requests. Since IRQs may complete at any time, if the current process is no longer the same that started the process, we need to swich the paging context before accessing user buffers. Change the PATA driver to use this model.
1 parent 91db318 commit e445ff6

17 files changed

+809
-245
lines changed

Kernel/CMakeLists.txt

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,7 @@ set(KERNEL_SOURCES
1414
CMOS.cpp
1515
CommandLine.cpp
1616
Console.cpp
17+
Devices/AsyncDeviceRequest.cpp
1718
Devices/BXVGADevice.cpp
1819
Devices/BlockDevice.cpp
1920
Devices/CharacterDevice.cpp
Lines changed: 175 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,175 @@
1+
/*
2+
* Copyright (c) 2020, The SerenityOS developers.
3+
* All rights reserved.
4+
*
5+
* Redistribution and use in source and binary forms, with or without
6+
* modification, are permitted provided that the following conditions are met:
7+
*
8+
* 1. Redistributions of source code must retain the above copyright notice, this
9+
* list of conditions and the following disclaimer.
10+
*
11+
* 2. Redistributions in binary form must reproduce the above copyright notice,
12+
* this list of conditions and the following disclaimer in the documentation
13+
* and/or other materials provided with the distribution.
14+
*
15+
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16+
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17+
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18+
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
19+
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20+
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21+
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
22+
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
23+
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
24+
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25+
*/
26+
27+
#include <Kernel/Devices/AsyncDeviceRequest.h>
28+
#include <Kernel/Devices/Device.h>
29+
30+
namespace Kernel {
31+
32+
AsyncDeviceRequest::AsyncDeviceRequest(Device& device)
33+
: m_device(device)
34+
, m_process(*Process::current())
35+
{
36+
}
37+
38+
AsyncDeviceRequest::~AsyncDeviceRequest()
39+
{
40+
{
41+
ScopedSpinLock lock(m_lock);
42+
ASSERT(is_completed_result(m_result));
43+
ASSERT(m_sub_requests_pending.is_empty());
44+
}
45+
46+
// We should not need any locking here anymore. The destructor should
47+
// only be called until either wait() or cancel() (once implemented) returned.
48+
// At that point no sub-request should be adding more requests and all
49+
// sub-requests should be completed (either succeeded, failed, or cancelled).
50+
// Which means there should be no more pending sub-requests and the
51+
// entire AsyncDeviceRequest hirarchy should be immutable.
52+
for (auto& sub_request : m_sub_requests_complete) {
53+
ASSERT(is_completed_result(sub_request.m_result)); // Shouldn't need any locking anymore
54+
ASSERT(sub_request.m_parent_request == this);
55+
sub_request.m_parent_request = nullptr;
56+
}
57+
}
58+
59+
void AsyncDeviceRequest::request_finished()
60+
{
61+
if (m_parent_request)
62+
m_parent_request->sub_request_finished(*this);
63+
64+
// Trigger processing the next request
65+
m_device.process_next_queued_request({}, *this);
66+
67+
// Wake anyone who may be waiting
68+
m_queue.wake_all();
69+
}
70+
71+
auto AsyncDeviceRequest::wait(timeval* timeout) -> RequestWaitResult
72+
{
73+
ASSERT(!m_parent_request);
74+
auto request_result = get_request_result();
75+
if (is_completed_result(request_result))
76+
return { request_result, Thread::BlockResult::NotBlocked };
77+
auto wait_result = Thread::current()->wait_on(m_queue, name(), timeout);
78+
return { get_request_result(), wait_result };
79+
}
80+
81+
auto AsyncDeviceRequest::get_request_result() const -> RequestResult
82+
{
83+
ScopedSpinLock lock(m_lock);
84+
return m_result;
85+
}
86+
87+
void AsyncDeviceRequest::add_sub_request(NonnullRefPtr<AsyncDeviceRequest> sub_request)
88+
{
89+
// Sub-requests cannot be for the same device
90+
ASSERT(&m_device != &sub_request->m_device);
91+
ASSERT(sub_request->m_parent_request == nullptr);
92+
sub_request->m_parent_request = this;
93+
94+
bool should_start;
95+
{
96+
ScopedSpinLock lock(m_lock);
97+
ASSERT(!is_completed_result(m_result));
98+
m_sub_requests_pending.append(sub_request);
99+
should_start = (m_result == Started);
100+
}
101+
if (should_start)
102+
sub_request->do_start();
103+
}
104+
105+
void AsyncDeviceRequest::sub_request_finished(AsyncDeviceRequest& sub_request)
106+
{
107+
bool all_completed;
108+
{
109+
ScopedSpinLock lock(m_lock);
110+
ASSERT(m_result == Started);
111+
size_t index;
112+
for (index = 0; index < m_sub_requests_pending.size(); index++) {
113+
if (&m_sub_requests_pending[index] == &sub_request) {
114+
NonnullRefPtr<AsyncDeviceRequest> request(m_sub_requests_pending[index]);
115+
m_sub_requests_pending.remove(index);
116+
m_sub_requests_complete.append(move(request));
117+
break;
118+
}
119+
}
120+
ASSERT(index < m_sub_requests_pending.size());
121+
all_completed = m_sub_requests_pending.is_empty();
122+
if (all_completed) {
123+
// Aggregate any errors
124+
bool any_failures = false;
125+
bool any_memory_faults = false;
126+
for (index = 0; index < m_sub_requests_complete.size(); index++) {
127+
auto& sub_request = m_sub_requests_complete[index];
128+
auto sub_result = sub_request.get_request_result();
129+
ASSERT(is_completed_result(sub_result));
130+
switch (sub_result) {
131+
case Failure:
132+
any_failures = true;
133+
break;
134+
case MemoryFault:
135+
any_memory_faults = true;
136+
break;
137+
default:
138+
break;
139+
}
140+
if (any_failures && any_memory_faults)
141+
break; // Stop checking if all error conditions were found
142+
}
143+
if (any_failures)
144+
m_result = Failure;
145+
else if (any_memory_faults)
146+
m_result = MemoryFault;
147+
else
148+
m_result = Success;
149+
}
150+
}
151+
if (all_completed)
152+
request_finished();
153+
}
154+
155+
void AsyncDeviceRequest::complete(RequestResult result)
156+
{
157+
ASSERT(result == Success || result == Failure || result == MemoryFault);
158+
ScopedCritical critical;
159+
{
160+
ScopedSpinLock lock(m_lock);
161+
ASSERT(m_result == Started);
162+
m_result = result;
163+
}
164+
if (Processor::current().in_irq()) {
165+
ref(); // Make sure we don't get freed
166+
Processor::deferred_call_queue([this]() {
167+
request_finished();
168+
unref();
169+
});
170+
} else {
171+
request_finished();
172+
}
173+
}
174+
175+
}
Lines changed: 175 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,175 @@
1+
/*
2+
* Copyright (c) 2020, The SerenityOS developers.
3+
* All rights reserved.
4+
*
5+
* Redistribution and use in source and binary forms, with or without
6+
* modification, are permitted provided that the following conditions are met:
7+
*
8+
* 1. Redistributions of source code must retain the above copyright notice, this
9+
* list of conditions and the following disclaimer.
10+
*
11+
* 2. Redistributions in binary form must reproduce the above copyright notice,
12+
* this list of conditions and the following disclaimer in the documentation
13+
* and/or other materials provided with the distribution.
14+
*
15+
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16+
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17+
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18+
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
19+
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20+
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21+
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
22+
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
23+
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
24+
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25+
*/
26+
27+
#pragma once
28+
29+
#include <AK/NonnullRefPtrVector.h>
30+
#include <Kernel/Process.h>
31+
#include <Kernel/Thread.h>
32+
#include <Kernel/UserOrKernelBuffer.h>
33+
#include <Kernel/VM/ProcessPagingScope.h>
34+
#include <Kernel/WaitQueue.h>
35+
36+
namespace Kernel {
37+
38+
class Device;
39+
40+
class AsyncDeviceRequest : public RefCounted<AsyncDeviceRequest> {
41+
AK_MAKE_NONCOPYABLE(AsyncDeviceRequest);
42+
AK_MAKE_NONMOVABLE(AsyncDeviceRequest);
43+
44+
public:
45+
enum RequestResult {
46+
Pending = 0,
47+
Started,
48+
Success,
49+
Failure,
50+
MemoryFault,
51+
Cancelled
52+
};
53+
54+
class RequestWaitResult {
55+
friend class AsyncDeviceRequest;
56+
57+
public:
58+
RequestResult request_result() const { return m_request_result; }
59+
Thread::BlockResult wait_result() const { return m_wait_result; }
60+
61+
private:
62+
RequestWaitResult(RequestResult request_result, Thread::BlockResult wait_result)
63+
: m_request_result(request_result)
64+
, m_wait_result(wait_result)
65+
{
66+
}
67+
68+
RequestResult m_request_result;
69+
Thread::BlockResult m_wait_result;
70+
};
71+
72+
virtual ~AsyncDeviceRequest();
73+
74+
virtual const char* name() const = 0;
75+
virtual void start() = 0;
76+
77+
void add_sub_request(NonnullRefPtr<AsyncDeviceRequest>);
78+
79+
[[nodiscard]] RequestWaitResult wait(timeval* = nullptr);
80+
81+
void do_start(Badge<Device>)
82+
{
83+
do_start();
84+
}
85+
86+
void complete(RequestResult result);
87+
88+
void set_private(void* priv)
89+
{
90+
ASSERT(!m_private || !priv);
91+
m_private = priv;
92+
}
93+
void* get_private() const { return m_private; }
94+
95+
template<typename... Args>
96+
[[nodiscard]] bool write_to_buffer(UserOrKernelBuffer& buffer, Args... args)
97+
{
98+
if (in_target_context(buffer))
99+
return buffer.write(forward<Args>(args)...);
100+
ProcessPagingScope paging_scope(m_process);
101+
return buffer.write(forward<Args>(args)...);
102+
}
103+
104+
template<size_t BUFFER_BYTES, typename... Args>
105+
[[nodiscard]] bool write_to_buffer_buffered(UserOrKernelBuffer& buffer, Args... args)
106+
{
107+
if (in_target_context(buffer))
108+
return buffer.write_buffered<BUFFER_BYTES>(forward<Args>(args)...);
109+
ProcessPagingScope paging_scope(m_process);
110+
return buffer.write_buffered<BUFFER_BYTES>(forward<Args>(args)...);
111+
}
112+
113+
template<typename... Args>
114+
[[nodiscard]] bool read_from_buffer(const UserOrKernelBuffer& buffer, Args... args)
115+
{
116+
if (in_target_context(buffer))
117+
return buffer.read(forward<Args>(args)...);
118+
ProcessPagingScope paging_scope(m_process);
119+
return buffer.read(forward<Args>(args)...);
120+
}
121+
122+
template<size_t BUFFER_BYTES, typename... Args>
123+
[[nodiscard]] bool read_from_buffer_buffered(const UserOrKernelBuffer& buffer, Args... args)
124+
{
125+
if (in_target_context(buffer))
126+
return buffer.read_buffered<BUFFER_BYTES>(forward<Args>(args)...);
127+
ProcessPagingScope paging_scope(m_process);
128+
return buffer.read_buffered<BUFFER_BYTES>(forward<Args>(args)...);
129+
}
130+
131+
protected:
132+
AsyncDeviceRequest(Device&);
133+
134+
RequestResult get_request_result() const;
135+
136+
private:
137+
void sub_request_finished(AsyncDeviceRequest&);
138+
void request_finished();
139+
140+
void do_start()
141+
{
142+
{
143+
ScopedSpinLock lock(m_lock);
144+
if (is_completed_result(m_result))
145+
return;
146+
m_result = Started;
147+
}
148+
start();
149+
}
150+
151+
bool in_target_context(const UserOrKernelBuffer& buffer) const
152+
{
153+
if (buffer.is_kernel_buffer())
154+
return true;
155+
return m_process == Process::current();
156+
}
157+
158+
static bool is_completed_result(RequestResult result)
159+
{
160+
return result > Started;
161+
}
162+
163+
Device& m_device;
164+
165+
AsyncDeviceRequest* m_parent_request { nullptr };
166+
RequestResult m_result { Pending };
167+
NonnullRefPtrVector<AsyncDeviceRequest> m_sub_requests_pending;
168+
NonnullRefPtrVector<AsyncDeviceRequest> m_sub_requests_complete;
169+
WaitQueue m_queue;
170+
NonnullRefPtr<Process> m_process;
171+
void* m_private { nullptr };
172+
mutable SpinLock<u8> m_lock;
173+
};
174+
175+
}

Kernel/Devices/BXVGADevice.h

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -48,10 +48,9 @@ class BXVGADevice final : public BlockDevice {
4848
virtual const char* class_name() const override { return "BXVGA"; }
4949
virtual bool can_read(const FileDescription&, size_t) const override { return true; }
5050
virtual bool can_write(const FileDescription&, size_t) const override { return true; }
51+
virtual void start_request(AsyncBlockDeviceRequest& request) override { request.complete(AsyncDeviceRequest::Failure); }
5152
virtual KResultOr<size_t> read(FileDescription&, size_t, UserOrKernelBuffer&, size_t) override { return -EINVAL; }
5253
virtual KResultOr<size_t> write(FileDescription&, size_t, const UserOrKernelBuffer&, size_t) override { return -EINVAL; }
53-
virtual bool read_blocks(unsigned, u16, UserOrKernelBuffer&) override { return false; }
54-
virtual bool write_blocks(unsigned, u16, const UserOrKernelBuffer&) override { return false; }
5554

5655
void set_safe_resolution();
5756

0 commit comments

Comments
 (0)