Skip to content

Commit e8f543c

Browse files
committed
Kernel: Use intrusive RegionTree solution for kernel regions as well
This patch ports MemoryManager to RegionTree as well. The biggest difference between this and the userspace code is that kernel regions are owned by extant OwnPtr<Region> objects spread around the kernel, while userspace regions are owned by the AddressSpace itself. For kernelspace, there are a couple of situations where we need to make large VM reservations that never get backed by regular VMObjects (for example the kernel image reservation, or the big kmalloc range.) Since we can't make a VM reservation without a Region object anymore, this patch adds a way to create unbacked Region objects that can be used for this exact purpose. They have no internal VMObject.)
1 parent ffe2e77 commit e8f543c

File tree

12 files changed

+71
-49
lines changed

12 files changed

+71
-49
lines changed

Kernel/CMakeLists.txt

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -177,7 +177,6 @@ set(KERNEL_SOURCES
177177
Memory/SharedInodeVMObject.cpp
178178
Memory/VMObject.cpp
179179
Memory/VirtualRange.cpp
180-
Memory/VirtualRangeAllocator.cpp
181180
MiniStdLib.cpp
182181
Locking/LockRank.cpp
183182
Locking/Mutex.cpp

Kernel/Forward.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -80,7 +80,6 @@ class Region;
8080
class SharedInodeVMObject;
8181
class VMObject;
8282
class VirtualRange;
83-
class VirtualRangeAllocator;
8483
}
8584

8685
class Spinlock;

Kernel/Heap/kmalloc.cpp

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -353,20 +353,22 @@ struct KmallocGlobalData {
353353
void enable_expansion()
354354
{
355355
// FIXME: This range can be much bigger on 64-bit, but we need to figure something out for 32-bit.
356-
auto virtual_range = MM.kernel_page_directory().range_allocator().try_allocate_anywhere(64 * MiB, 1 * MiB);
356+
auto reserved_region = MUST(MM.region_tree().allocate_unbacked_anywhere(64 * MiB, 1 * MiB));
357357

358358
expansion_data = KmallocGlobalData::ExpansionData {
359-
.virtual_range = virtual_range.value(),
360-
.next_virtual_address = virtual_range.value().base(),
359+
.virtual_range = reserved_region->range(),
360+
.next_virtual_address = reserved_region->range().base(),
361361
};
362362

363363
// Make sure the entire kmalloc VM range is backed by page tables.
364364
// This avoids having to deal with lazy page table allocation during heap expansion.
365365
SpinlockLocker mm_locker(Memory::s_mm_lock);
366366
SpinlockLocker pd_locker(MM.kernel_page_directory().get_lock());
367-
for (auto vaddr = virtual_range.value().base(); vaddr < virtual_range.value().end(); vaddr = vaddr.offset(PAGE_SIZE)) {
367+
for (auto vaddr = reserved_region->range().base(); vaddr < reserved_region->range().end(); vaddr = vaddr.offset(PAGE_SIZE)) {
368368
MM.ensure_pte(MM.kernel_page_directory(), vaddr);
369369
}
370+
371+
(void)reserved_region.leak_ptr();
370372
}
371373

372374
struct ExpansionData {

Kernel/Memory/MemoryManager.cpp

Lines changed: 28 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,7 @@
2323
#include <Kernel/Memory/SharedInodeVMObject.h>
2424
#include <Kernel/Multiboot.h>
2525
#include <Kernel/Panic.h>
26+
#include <Kernel/Prekernel/Prekernel.h>
2627
#include <Kernel/Process.h>
2728
#include <Kernel/Sections.h>
2829
#include <Kernel/StdLib.h>
@@ -74,7 +75,14 @@ bool MemoryManager::is_initialized()
7475
return s_the != nullptr;
7576
}
7677

78+
static UNMAP_AFTER_INIT VirtualRange kernel_virtual_range()
79+
{
80+
auto kernel_range_start = kernel_mapping_base + 2 * MiB; // The first 2 MiB are used for mapping the pre-kernel
81+
return VirtualRange { VirtualAddress(kernel_range_start), KERNEL_PD_END - kernel_range_start };
82+
}
83+
7784
UNMAP_AFTER_INIT MemoryManager::MemoryManager()
85+
: m_region_tree(kernel_virtual_range())
7886
{
7987
s_the = this;
8088

@@ -439,13 +447,20 @@ UNMAP_AFTER_INIT void MemoryManager::initialize_physical_pages()
439447
// Create the bare page directory. This is not a fully constructed page directory and merely contains the allocators!
440448
m_kernel_page_directory = PageDirectory::must_create_kernel_page_directory();
441449

450+
{
451+
// Carve out the whole page directory covering the kernel image to make MemoryManager::initialize_physical_pages() happy
452+
FlatPtr start_of_range = ((FlatPtr)start_of_kernel_image & ~(FlatPtr)0x1fffff);
453+
FlatPtr end_of_range = ((FlatPtr)end_of_kernel_image & ~(FlatPtr)0x1fffff) + 0x200000;
454+
auto reserved_range = MUST(m_region_tree.try_allocate_specific(VirtualAddress(start_of_range), end_of_range - start_of_range));
455+
(void)MUST(Region::create_unbacked(reserved_range)).leak_ptr();
456+
}
457+
442458
// Allocate a virtual address range for our array
443-
auto range_or_error = m_kernel_page_directory->range_allocator().try_allocate_anywhere(physical_page_array_pages * PAGE_SIZE);
444-
if (range_or_error.is_error()) {
445-
dmesgln("MM: Could not allocate {} bytes to map physical page array!", physical_page_array_pages * PAGE_SIZE);
446-
VERIFY_NOT_REACHED();
459+
auto range = MUST(m_region_tree.try_allocate_anywhere(physical_page_array_pages * PAGE_SIZE));
460+
461+
{
462+
(void)MUST(Region::create_unbacked(range)).leak_ptr();
447463
}
448-
auto range = range_or_error.release_value();
449464

450465
// Now that we have our special m_physical_pages_region region with enough pages to hold the entire array
451466
// try to map the entire region into kernel space so we always have it
@@ -651,7 +666,7 @@ Region* MemoryManager::kernel_region_from_vaddr(VirtualAddress vaddr)
651666
return nullptr;
652667

653668
SpinlockLocker lock(s_mm_lock);
654-
auto* region = MM.m_kernel_regions.find_largest_not_above(vaddr.get());
669+
auto* region = MM.m_region_tree.regions().find_largest_not_above(vaddr.get());
655670
if (!region || !region->contains(vaddr))
656671
return nullptr;
657672
return region;
@@ -757,7 +772,7 @@ ErrorOr<NonnullOwnPtr<Region>> MemoryManager::allocate_contiguous_kernel_region(
757772
VERIFY(!(size % PAGE_SIZE));
758773
SpinlockLocker lock(kernel_page_directory().get_lock());
759774
auto vmobject = TRY(AnonymousVMObject::try_create_physically_contiguous_with_size(size));
760-
auto range = TRY(kernel_page_directory().range_allocator().try_allocate_anywhere(size));
775+
auto range = TRY(m_region_tree.try_allocate_anywhere(size));
761776
return allocate_kernel_region_with_vmobject(range, move(vmobject), name, access, cacheable);
762777
}
763778

@@ -796,7 +811,7 @@ ErrorOr<NonnullOwnPtr<Region>> MemoryManager::allocate_kernel_region(size_t size
796811
VERIFY(!(size % PAGE_SIZE));
797812
auto vmobject = TRY(AnonymousVMObject::try_create_with_size(size, strategy));
798813
SpinlockLocker lock(kernel_page_directory().get_lock());
799-
auto range = TRY(kernel_page_directory().range_allocator().try_allocate_anywhere(size));
814+
auto range = TRY(m_region_tree.try_allocate_anywhere(size));
800815
return allocate_kernel_region_with_vmobject(range, move(vmobject), name, access, cacheable);
801816
}
802817

@@ -805,7 +820,7 @@ ErrorOr<NonnullOwnPtr<Region>> MemoryManager::allocate_kernel_region(PhysicalAdd
805820
VERIFY(!(size % PAGE_SIZE));
806821
auto vmobject = TRY(AnonymousVMObject::try_create_for_physical_range(paddr, size));
807822
SpinlockLocker lock(kernel_page_directory().get_lock());
808-
auto range = TRY(kernel_page_directory().range_allocator().try_allocate_anywhere(size));
823+
auto range = TRY(m_region_tree.try_allocate_anywhere(size));
809824
return allocate_kernel_region_with_vmobject(range, move(vmobject), name, access, cacheable);
810825
}
811826

@@ -823,7 +838,7 @@ ErrorOr<NonnullOwnPtr<Region>> MemoryManager::allocate_kernel_region_with_vmobje
823838
{
824839
VERIFY(!(size % PAGE_SIZE));
825840
SpinlockLocker lock(kernel_page_directory().get_lock());
826-
auto range = TRY(kernel_page_directory().range_allocator().try_allocate_anywhere(size));
841+
auto range = TRY(m_region_tree.try_allocate_anywhere(size));
827842
return allocate_kernel_region_with_vmobject(range, vmobject, name, access, cacheable);
828843
}
829844

@@ -1146,14 +1161,14 @@ void MemoryManager::register_kernel_region(Region& region)
11461161
{
11471162
VERIFY(region.is_kernel());
11481163
SpinlockLocker lock(s_mm_lock);
1149-
m_kernel_regions.insert(region.vaddr().get(), region);
1164+
m_region_tree.regions().insert(region.vaddr().get(), region);
11501165
}
11511166

11521167
void MemoryManager::unregister_kernel_region(Region& region)
11531168
{
11541169
VERIFY(region.is_kernel());
11551170
SpinlockLocker lock(s_mm_lock);
1156-
m_kernel_regions.remove(region.vaddr().get());
1171+
m_region_tree.regions().remove(region.vaddr().get());
11571172
}
11581173

11591174
void MemoryManager::dump_kernel_regions()
@@ -1167,7 +1182,7 @@ void MemoryManager::dump_kernel_regions()
11671182
dbgln("BEGIN{} END{} SIZE{} ACCESS NAME",
11681183
addr_padding, addr_padding, addr_padding);
11691184
SpinlockLocker lock(s_mm_lock);
1170-
for (auto const& region : m_kernel_regions) {
1185+
for (auto const& region : m_region_tree.regions()) {
11711186
dbgln("{:p} -- {:p} {:p} {:c}{:c}{:c}{:c}{:c}{:c} {}",
11721187
region.vaddr().get(),
11731188
region.vaddr().offset(region.size() - 1).get(),

Kernel/Memory/MemoryManager.h

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,7 @@
1818
#include <Kernel/Memory/PhysicalPage.h>
1919
#include <Kernel/Memory/PhysicalRegion.h>
2020
#include <Kernel/Memory/Region.h>
21+
#include <Kernel/Memory/RegionTree.h>
2122
#include <Kernel/Memory/VMObject.h>
2223

2324
namespace Kernel {
@@ -245,6 +246,8 @@ class MemoryManager {
245246

246247
IterationDecision for_each_physical_memory_range(Function<IterationDecision(PhysicalMemoryRange const&)>);
247248

249+
auto& region_tree() { return m_region_tree; }
250+
248251
private:
249252
MemoryManager();
250253
~MemoryManager();
@@ -297,7 +300,7 @@ class MemoryManager {
297300
PhysicalPageEntry* m_physical_page_entries { nullptr };
298301
size_t m_physical_page_entries_count { 0 };
299302

300-
IntrusiveRedBlackTree<&Region::m_tree_node> m_kernel_regions;
303+
RegionTree m_region_tree;
301304

302305
Vector<UsedMemoryRange> m_used_memory_ranges;
303306
Vector<PhysicalMemoryRange> m_physical_memory_ranges;

Kernel/Memory/PageDirectory.cpp

Lines changed: 1 addition & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -22,16 +22,7 @@ namespace Kernel::Memory {
2222

2323
UNMAP_AFTER_INIT NonnullRefPtr<PageDirectory> PageDirectory::must_create_kernel_page_directory()
2424
{
25-
auto directory = adopt_ref_if_nonnull(new (nothrow) PageDirectory).release_nonnull();
26-
27-
auto kernel_range_start = kernel_mapping_base + 2 * MiB; // The first 2 MiB are used for mapping the pre-kernel
28-
MUST(directory->m_range_allocator.initialize_with_range(VirtualAddress(kernel_range_start), KERNEL_PD_END - kernel_range_start));
29-
// Carve out the whole page directory covering the kernel image to make MemoryManager::initialize_physical_pages() happy
30-
FlatPtr start_of_range = ((FlatPtr)start_of_kernel_image & ~(FlatPtr)0x1fffff);
31-
FlatPtr end_of_range = ((FlatPtr)end_of_kernel_image & ~(FlatPtr)0x1fffff) + 0x200000;
32-
MUST(directory->m_range_allocator.try_allocate_specific(VirtualAddress(start_of_range), end_of_range - start_of_range));
33-
34-
return directory;
25+
return adopt_ref_if_nonnull(new (nothrow) PageDirectory).release_nonnull();
3526
}
3627

3728
ErrorOr<NonnullRefPtr<PageDirectory>> PageDirectory::try_create_for_userspace()

Kernel/Memory/PageDirectory.h

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,6 @@
1313
#include <AK/RefPtr.h>
1414
#include <Kernel/Forward.h>
1515
#include <Kernel/Memory/PhysicalPage.h>
16-
#include <Kernel/Memory/VirtualRangeAllocator.h>
1716

1817
namespace Kernel::Memory {
1918

@@ -47,9 +46,6 @@ class PageDirectory : public RefCounted<PageDirectory> {
4746
#endif
4847
}
4948

50-
VirtualRangeAllocator& range_allocator() { return m_range_allocator; }
51-
VirtualRangeAllocator const& range_allocator() const { return m_range_allocator; }
52-
5349
AddressSpace* address_space() { return m_space; }
5450
AddressSpace const* address_space() const { return m_space; }
5551

@@ -66,7 +62,6 @@ class PageDirectory : public RefCounted<PageDirectory> {
6662
static void deregister_page_directory(PageDirectory* directory);
6763

6864
AddressSpace* m_space { nullptr };
69-
VirtualRangeAllocator m_range_allocator;
7065
#if ARCH(X86_64)
7166
RefPtr<PhysicalPage> m_pml4t;
7267
#endif

Kernel/Memory/Region.cpp

Lines changed: 15 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,13 @@
2222

2323
namespace Kernel::Memory {
2424

25+
Region::Region(VirtualRange const& range)
26+
: m_range(range)
27+
{
28+
if (is_kernel())
29+
MM.register_kernel_region(*this);
30+
}
31+
2532
Region::Region(VirtualRange const& range, NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, OwnPtr<KString> name, Region::Access access, Cacheable cacheable, bool shared)
2633
: m_range(range)
2734
, m_offset_in_vmobject(offset_in_vmobject)
@@ -56,10 +63,7 @@ Region::~Region()
5663
if (m_page_directory) {
5764
SpinlockLocker pd_locker(m_page_directory->get_lock());
5865
if (!is_readable() && !is_writable() && !is_executable()) {
59-
// If the region is "PROT_NONE", we didn't map it in the first place,
60-
// so all we need to do here is deallocate the VM.
61-
if (is_kernel())
62-
m_page_directory->range_allocator().deallocate(range());
66+
// If the region is "PROT_NONE", we didn't map it in the first place.
6367
} else {
6468
SpinlockLocker mm_locker(s_mm_lock);
6569
unmap_with_locks_held(ShouldDeallocateVirtualRange::Yes, ShouldFlushTLB::Yes, pd_locker, mm_locker);
@@ -68,6 +72,11 @@ Region::~Region()
6872
}
6973
}
7074

75+
ErrorOr<NonnullOwnPtr<Region>> Region::create_unbacked(VirtualRange const& range)
76+
{
77+
return adopt_nonnull_own_or_enomem(new (nothrow) Region(range));
78+
}
79+
7180
ErrorOr<NonnullOwnPtr<Region>> Region::try_clone()
7281
{
7382
VERIFY(Process::has_current());
@@ -84,7 +93,7 @@ ErrorOr<NonnullOwnPtr<Region>> Region::try_clone()
8493
region_name = TRY(m_name->try_clone());
8594

8695
auto region = TRY(Region::try_create_user_accessible(
87-
m_range, m_vmobject, m_offset_in_vmobject, move(region_name), access(), m_cacheable ? Cacheable::Yes : Cacheable::No, m_shared));
96+
m_range, vmobject(), m_offset_in_vmobject, move(region_name), access(), m_cacheable ? Cacheable::Yes : Cacheable::No, m_shared));
8897
region->set_mmap(m_mmap);
8998
region->set_shared(m_shared);
9099
region->set_syscall_region(is_syscall_region());
@@ -259,7 +268,7 @@ void Region::unmap(ShouldDeallocateVirtualRange should_deallocate_range, ShouldF
259268
unmap_with_locks_held(should_deallocate_range, should_flush_tlb, pd_locker, mm_locker);
260269
}
261270

262-
void Region::unmap_with_locks_held(ShouldDeallocateVirtualRange deallocate_range, ShouldFlushTLB should_flush_tlb, SpinlockLocker<RecursiveSpinlock>&, SpinlockLocker<RecursiveSpinlock>&)
271+
void Region::unmap_with_locks_held(ShouldDeallocateVirtualRange, ShouldFlushTLB should_flush_tlb, SpinlockLocker<RecursiveSpinlock>&, SpinlockLocker<RecursiveSpinlock>&)
263272
{
264273
if (!m_page_directory)
265274
return;
@@ -270,10 +279,6 @@ void Region::unmap_with_locks_held(ShouldDeallocateVirtualRange deallocate_range
270279
}
271280
if (should_flush_tlb == ShouldFlushTLB::Yes)
272281
MemoryManager::flush_tlb(m_page_directory, vaddr(), page_count());
273-
if (deallocate_range == ShouldDeallocateVirtualRange::Yes) {
274-
if (is_kernel())
275-
m_page_directory->range_allocator().deallocate(range());
276-
}
277282
m_page_directory = nullptr;
278283
}
279284

Kernel/Memory/Region.h

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@
1414
#include <Kernel/Forward.h>
1515
#include <Kernel/KString.h>
1616
#include <Kernel/Memory/PageFaultResponse.h>
17-
#include <Kernel/Memory/VirtualRangeAllocator.h>
17+
#include <Kernel/Memory/VirtualRange.h>
1818
#include <Kernel/Sections.h>
1919
#include <Kernel/UnixTypes.h>
2020

@@ -56,6 +56,7 @@ class Region final
5656

5757
static ErrorOr<NonnullOwnPtr<Region>> try_create_user_accessible(VirtualRange const&, NonnullRefPtr<VMObject>, size_t offset_in_vmobject, OwnPtr<KString> name, Region::Access access, Cacheable, bool shared);
5858
static ErrorOr<NonnullOwnPtr<Region>> try_create_kernel_only(VirtualRange const&, NonnullRefPtr<VMObject>, size_t offset_in_vmobject, OwnPtr<KString> name, Region::Access access, Cacheable = Cacheable::Yes);
59+
static ErrorOr<NonnullOwnPtr<Region>> create_unbacked(VirtualRange const&);
5960

6061
~Region();
6162

@@ -198,6 +199,7 @@ class Region final
198199
void set_syscall_region(bool b) { m_syscall_region = b; }
199200

200201
private:
202+
explicit Region(VirtualRange const&);
201203
Region(VirtualRange const&, NonnullRefPtr<VMObject>, size_t offset_in_vmobject, OwnPtr<KString>, Region::Access access, Cacheable, bool shared);
202204

203205
[[nodiscard]] bool remap_vmobject_page(size_t page_index, bool with_flush = true);
@@ -220,7 +222,7 @@ class Region final
220222
RefPtr<PageDirectory> m_page_directory;
221223
VirtualRange m_range;
222224
size_t m_offset_in_vmobject { 0 };
223-
NonnullRefPtr<VMObject> m_vmobject;
225+
RefPtr<VMObject> m_vmobject;
224226
OwnPtr<KString> m_name;
225227
u8 m_access { Region::None };
226228
bool m_shared : 1 { false };

Kernel/Memory/RegionTree.cpp

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -140,4 +140,12 @@ ErrorOr<VirtualRange> RegionTree::try_allocate_randomized(size_t size, size_t al
140140

141141
return try_allocate_anywhere(size, alignment);
142142
}
143+
144+
ErrorOr<NonnullOwnPtr<Region>> RegionTree::allocate_unbacked_anywhere(size_t size, size_t alignment)
145+
{
146+
SpinlockLocker locker(m_lock);
147+
auto range = TRY(try_allocate_anywhere(size, alignment));
148+
return Region::create_unbacked(range);
149+
}
150+
143151
}

0 commit comments

Comments
 (0)