Skip to content

Commit ecdd9a5

Browse files
committed
Kernel: Reduce code duplication a little bit in Region allocation
This patch reduces the number of code paths that lead to the allocation of a Region object. It's quite hard to follow the various ways in which this can happen, so this is an effort to simplify.
1 parent 5e0c4d6 commit ecdd9a5

File tree

2 files changed

+21
-22
lines changed

2 files changed

+21
-22
lines changed

Kernel/VM/MemoryManager.cpp

Lines changed: 20 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -310,60 +310,58 @@ PageFaultResponse MemoryManager::handle_page_fault(const PageFault& fault)
310310

311311
OwnPtr<Region> MemoryManager::allocate_kernel_region(size_t size, const StringView& name, u8 access, bool user_accessible, bool should_commit, bool cacheable)
312312
{
313-
InterruptDisabler disabler;
314313
ASSERT(!(size % PAGE_SIZE));
315314
auto range = kernel_page_directory().range_allocator().allocate_anywhere(size);
316-
ASSERT(range.is_valid());
315+
if (!range.is_valid())
316+
return nullptr;
317317
auto vmobject = AnonymousVMObject::create_with_size(size);
318-
OwnPtr<Region> region;
319-
if (user_accessible)
320-
region = Region::create_user_accessible(range, vmobject, 0, name, access, cacheable);
321-
else
322-
region = Region::create_kernel_only(range, vmobject, 0, name, access, cacheable);
323-
region->map(kernel_page_directory());
318+
auto region = allocate_kernel_region_with_vmobject(range, vmobject, name, access, user_accessible, cacheable);
319+
if (!region)
320+
return nullptr;
324321
if (should_commit)
325322
region->commit();
326323
return region;
327324
}
328325

329326
OwnPtr<Region> MemoryManager::allocate_kernel_region(PhysicalAddress paddr, size_t size, const StringView& name, u8 access, bool user_accessible, bool cacheable)
330327
{
331-
InterruptDisabler disabler;
332328
ASSERT(!(size % PAGE_SIZE));
333329
auto range = kernel_page_directory().range_allocator().allocate_anywhere(size);
334-
ASSERT(range.is_valid());
330+
if (!range.is_valid())
331+
return nullptr;
335332
auto vmobject = AnonymousVMObject::create_for_physical_range(paddr, size);
336333
if (!vmobject)
337334
return nullptr;
338-
OwnPtr<Region> region;
339-
if (user_accessible)
340-
region = Region::create_user_accessible(range, vmobject.release_nonnull(), 0, name, access, cacheable);
341-
else
342-
region = Region::create_kernel_only(range, vmobject.release_nonnull(), 0, name, access, cacheable);
343-
region->map(kernel_page_directory());
344-
return region;
335+
return allocate_kernel_region_with_vmobject(range, *vmobject, name, access, user_accessible, cacheable);
345336
}
346337

347338
OwnPtr<Region> MemoryManager::allocate_user_accessible_kernel_region(size_t size, const StringView& name, u8 access, bool cacheable)
348339
{
349340
return allocate_kernel_region(size, name, access, true, true, cacheable);
350341
}
351342

352-
OwnPtr<Region> MemoryManager::allocate_kernel_region_with_vmobject(VMObject& vmobject, size_t size, const StringView& name, u8 access, bool user_accessible, bool cacheable)
343+
OwnPtr<Region> MemoryManager::allocate_kernel_region_with_vmobject(const Range& range, VMObject& vmobject, const StringView& name, u8 access, bool user_accessible, bool cacheable)
353344
{
354345
InterruptDisabler disabler;
355-
ASSERT(!(size % PAGE_SIZE));
356-
auto range = kernel_page_directory().range_allocator().allocate_anywhere(size);
357-
ASSERT(range.is_valid());
358346
OwnPtr<Region> region;
359347
if (user_accessible)
360348
region = Region::create_user_accessible(range, vmobject, 0, name, access, cacheable);
361349
else
362350
region = Region::create_kernel_only(range, vmobject, 0, name, access, cacheable);
363-
region->map(kernel_page_directory());
351+
if (region)
352+
region->map(kernel_page_directory());
364353
return region;
365354
}
366355

356+
OwnPtr<Region> MemoryManager::allocate_kernel_region_with_vmobject(VMObject& vmobject, size_t size, const StringView& name, u8 access, bool user_accessible, bool cacheable)
357+
{
358+
ASSERT(!(size % PAGE_SIZE));
359+
auto range = kernel_page_directory().range_allocator().allocate_anywhere(size);
360+
if (!range.is_valid())
361+
return nullptr;
362+
return allocate_kernel_region_with_vmobject(range, vmobject, name, access, user_accessible, cacheable);
363+
}
364+
367365
void MemoryManager::deallocate_user_physical_page(PhysicalPage&& page)
368366
{
369367
for (auto& region : m_user_physical_regions) {

Kernel/VM/MemoryManager.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -106,6 +106,7 @@ class MemoryManager {
106106
OwnPtr<Region> allocate_kernel_region(size_t, const StringView& name, u8 access, bool user_accessible = false, bool should_commit = true, bool cacheable = true);
107107
OwnPtr<Region> allocate_kernel_region(PhysicalAddress, size_t, const StringView& name, u8 access, bool user_accessible = false, bool cacheable = false);
108108
OwnPtr<Region> allocate_kernel_region_with_vmobject(VMObject&, size_t, const StringView& name, u8 access, bool user_accessible = false, bool cacheable = false);
109+
OwnPtr<Region> allocate_kernel_region_with_vmobject(const Range&, VMObject&, const StringView& name, u8 access, bool user_accessible = false, bool cacheable = false);
109110
OwnPtr<Region> allocate_user_accessible_kernel_region(size_t, const StringView& name, u8 access, bool cacheable = false);
110111

111112
unsigned user_physical_pages() const { return m_user_physical_pages; }

0 commit comments

Comments
 (0)