@@ -58,6 +58,7 @@ void MemoryManager::initializePaging()
58
58
59
59
auto MemoryManager::ensurePTE (LinearAddress linearAddress) -> PageTableEntry
60
60
{
61
+ ASSERT_INTERRUPTS_DISABLED ();
61
62
dword pageDirectoryIndex = (linearAddress.get () >> 22 ) & 0x3ff ;
62
63
dword pageTableIndex = (linearAddress.get () >> 12 ) & 0x3ff ;
63
64
@@ -84,6 +85,7 @@ auto MemoryManager::ensurePTE(LinearAddress linearAddress) -> PageTableEntry
84
85
85
86
void MemoryManager::protectMap (LinearAddress linearAddress, size_t length)
86
87
{
88
+ InterruptDisabler disabler;
87
89
// FIXME: ASSERT(linearAddress is 4KB aligned);
88
90
for (dword offset = 0 ; offset < length; offset += 4096 ) {
89
91
auto pteAddress = linearAddress.offset (offset);
@@ -98,6 +100,7 @@ void MemoryManager::protectMap(LinearAddress linearAddress, size_t length)
98
100
99
101
void MemoryManager::identityMap (LinearAddress linearAddress, size_t length)
100
102
{
103
+ InterruptDisabler disabler;
101
104
// FIXME: ASSERT(linearAddress is 4KB aligned);
102
105
for (dword offset = 0 ; offset < length; offset += 4096 ) {
103
106
auto pteAddress = linearAddress.offset (offset);
@@ -117,6 +120,7 @@ void MemoryManager::initialize()
117
120
118
121
PageFaultResponse MemoryManager::handlePageFault (const PageFault& fault)
119
122
{
123
+ ASSERT_INTERRUPTS_DISABLED ();
120
124
kprintf (" MM: handlePageFault(%w) at laddr=%p\n " , fault.code (), fault.address ().get ());
121
125
if (fault.isNotPresent ()) {
122
126
kprintf (" >> NP fault!\n " );
@@ -138,6 +142,7 @@ RetainPtr<Zone> MemoryManager::createZone(size_t size)
138
142
139
143
Vector<PhysicalAddress> MemoryManager::allocatePhysicalPages (size_t count)
140
144
{
145
+ InterruptDisabler disabler;
141
146
if (count > m_freePages.size ())
142
147
return { };
143
148
@@ -150,6 +155,7 @@ Vector<PhysicalAddress> MemoryManager::allocatePhysicalPages(size_t count)
150
155
151
156
byte* MemoryManager::quickMapOnePage (PhysicalAddress physicalAddress)
152
157
{
158
+ ASSERT_INTERRUPTS_DISABLED ();
153
159
auto pte = ensurePTE (LinearAddress (4 * MB));
154
160
kprintf (" quickmap %x @ %x {pte @ %p}\n " , physicalAddress.get (), 4 *MB, pte.ptr ());
155
161
pte.setPhysicalPageBase (physicalAddress.pageBase ());
@@ -174,6 +180,7 @@ void MemoryManager::flushTLB(LinearAddress laddr)
174
180
175
181
bool MemoryManager::unmapRegion (Task& task, Task::Region& region)
176
182
{
183
+ InterruptDisabler disabler;
177
184
auto & zone = *region.zone ;
178
185
for (size_t i = 0 ; i < zone.m_pages .size (); ++i) {
179
186
auto laddr = region.linearAddress .offset (i * PAGE_SIZE);
@@ -190,6 +197,7 @@ bool MemoryManager::unmapRegion(Task& task, Task::Region& region)
190
197
191
198
bool MemoryManager::unmapRegionsForTask (Task& task)
192
199
{
200
+ ASSERT_INTERRUPTS_DISABLED ();
193
201
for (auto & region : task.m_regions ) {
194
202
if (!unmapRegion (task, *region))
195
203
return false ;
@@ -199,6 +207,7 @@ bool MemoryManager::unmapRegionsForTask(Task& task)
199
207
200
208
bool MemoryManager::mapRegion (Task& task, Task::Region& region)
201
209
{
210
+ InterruptDisabler disabler;
202
211
auto & zone = *region.zone ;
203
212
for (size_t i = 0 ; i < zone.m_pages .size (); ++i) {
204
213
auto laddr = region.linearAddress .offset (i * PAGE_SIZE);
@@ -215,6 +224,7 @@ bool MemoryManager::mapRegion(Task& task, Task::Region& region)
215
224
216
225
bool MemoryManager::mapRegionsForTask (Task& task)
217
226
{
227
+ ASSERT_INTERRUPTS_DISABLED ();
218
228
for (auto & region : task.m_regions ) {
219
229
if (!mapRegion (task, *region))
220
230
return false ;
@@ -229,6 +239,7 @@ bool copyToZone(Zone& zone, const void* data, size_t size)
229
239
return false ;
230
240
}
231
241
242
+ InterruptDisabler disabler;
232
243
auto * dataptr = (const byte*)data;
233
244
size_t remaining = size;
234
245
for (size_t i = 0 ; i < zone.m_pages .size (); ++i) {
0 commit comments