23
23
#include < Kernel/Memory/SharedInodeVMObject.h>
24
24
#include < Kernel/Multiboot.h>
25
25
#include < Kernel/Panic.h>
26
+ #include < Kernel/Prekernel/Prekernel.h>
26
27
#include < Kernel/Process.h>
27
28
#include < Kernel/Sections.h>
28
29
#include < Kernel/StdLib.h>
@@ -74,7 +75,14 @@ bool MemoryManager::is_initialized()
74
75
return s_the != nullptr ;
75
76
}
76
77
78
+ static UNMAP_AFTER_INIT VirtualRange kernel_virtual_range ()
79
+ {
80
+ auto kernel_range_start = kernel_mapping_base + 2 * MiB; // The first 2 MiB are used for mapping the pre-kernel
81
+ return VirtualRange { VirtualAddress (kernel_range_start), KERNEL_PD_END - kernel_range_start };
82
+ }
83
+
77
84
UNMAP_AFTER_INIT MemoryManager::MemoryManager ()
85
+ : m_region_tree(kernel_virtual_range())
78
86
{
79
87
s_the = this ;
80
88
@@ -439,13 +447,20 @@ UNMAP_AFTER_INIT void MemoryManager::initialize_physical_pages()
439
447
// Create the bare page directory. This is not a fully constructed page directory and merely contains the allocators!
440
448
m_kernel_page_directory = PageDirectory::must_create_kernel_page_directory ();
441
449
450
+ {
451
+ // Carve out the whole page directory covering the kernel image to make MemoryManager::initialize_physical_pages() happy
452
+ FlatPtr start_of_range = ((FlatPtr)start_of_kernel_image & ~(FlatPtr)0x1fffff );
453
+ FlatPtr end_of_range = ((FlatPtr)end_of_kernel_image & ~(FlatPtr)0x1fffff ) + 0x200000 ;
454
+ auto reserved_range = MUST (m_region_tree.try_allocate_specific (VirtualAddress (start_of_range), end_of_range - start_of_range));
455
+ (void )MUST (Region::create_unbacked (reserved_range)).leak_ptr ();
456
+ }
457
+
442
458
// Allocate a virtual address range for our array
443
- auto range_or_error = m_kernel_page_directory-> range_allocator () .try_allocate_anywhere (physical_page_array_pages * PAGE_SIZE);
444
- if (range_or_error. is_error ()) {
445
- dmesgln ( " MM: Could not allocate {} bytes to map physical page array! " , physical_page_array_pages * PAGE_SIZE);
446
- VERIFY_NOT_REACHED ();
459
+ auto range = MUST (m_region_tree .try_allocate_anywhere (physical_page_array_pages * PAGE_SIZE) );
460
+
461
+ {
462
+ ( void ) MUST ( Region::create_unbacked (range)). leak_ptr ();
447
463
}
448
- auto range = range_or_error.release_value ();
449
464
450
465
// Now that we have our special m_physical_pages_region region with enough pages to hold the entire array
451
466
// try to map the entire region into kernel space so we always have it
@@ -651,7 +666,7 @@ Region* MemoryManager::kernel_region_from_vaddr(VirtualAddress vaddr)
651
666
return nullptr ;
652
667
653
668
SpinlockLocker lock (s_mm_lock);
654
- auto * region = MM.m_kernel_regions .find_largest_not_above (vaddr.get ());
669
+ auto * region = MM.m_region_tree . regions () .find_largest_not_above (vaddr.get ());
655
670
if (!region || !region->contains (vaddr))
656
671
return nullptr ;
657
672
return region;
@@ -757,7 +772,7 @@ ErrorOr<NonnullOwnPtr<Region>> MemoryManager::allocate_contiguous_kernel_region(
757
772
VERIFY (!(size % PAGE_SIZE));
758
773
SpinlockLocker lock (kernel_page_directory ().get_lock ());
759
774
auto vmobject = TRY (AnonymousVMObject::try_create_physically_contiguous_with_size (size));
760
- auto range = TRY (kernel_page_directory (). range_allocator () .try_allocate_anywhere (size));
775
+ auto range = TRY (m_region_tree .try_allocate_anywhere (size));
761
776
return allocate_kernel_region_with_vmobject (range, move (vmobject), name, access, cacheable);
762
777
}
763
778
@@ -796,7 +811,7 @@ ErrorOr<NonnullOwnPtr<Region>> MemoryManager::allocate_kernel_region(size_t size
796
811
VERIFY (!(size % PAGE_SIZE));
797
812
auto vmobject = TRY (AnonymousVMObject::try_create_with_size (size, strategy));
798
813
SpinlockLocker lock (kernel_page_directory ().get_lock ());
799
- auto range = TRY (kernel_page_directory (). range_allocator () .try_allocate_anywhere (size));
814
+ auto range = TRY (m_region_tree .try_allocate_anywhere (size));
800
815
return allocate_kernel_region_with_vmobject (range, move (vmobject), name, access, cacheable);
801
816
}
802
817
@@ -805,7 +820,7 @@ ErrorOr<NonnullOwnPtr<Region>> MemoryManager::allocate_kernel_region(PhysicalAdd
805
820
VERIFY (!(size % PAGE_SIZE));
806
821
auto vmobject = TRY (AnonymousVMObject::try_create_for_physical_range (paddr, size));
807
822
SpinlockLocker lock (kernel_page_directory ().get_lock ());
808
- auto range = TRY (kernel_page_directory (). range_allocator () .try_allocate_anywhere (size));
823
+ auto range = TRY (m_region_tree .try_allocate_anywhere (size));
809
824
return allocate_kernel_region_with_vmobject (range, move (vmobject), name, access, cacheable);
810
825
}
811
826
@@ -823,7 +838,7 @@ ErrorOr<NonnullOwnPtr<Region>> MemoryManager::allocate_kernel_region_with_vmobje
823
838
{
824
839
VERIFY (!(size % PAGE_SIZE));
825
840
SpinlockLocker lock (kernel_page_directory ().get_lock ());
826
- auto range = TRY (kernel_page_directory (). range_allocator () .try_allocate_anywhere (size));
841
+ auto range = TRY (m_region_tree .try_allocate_anywhere (size));
827
842
return allocate_kernel_region_with_vmobject (range, vmobject, name, access, cacheable);
828
843
}
829
844
@@ -1146,14 +1161,14 @@ void MemoryManager::register_kernel_region(Region& region)
1146
1161
{
1147
1162
VERIFY (region.is_kernel ());
1148
1163
SpinlockLocker lock (s_mm_lock);
1149
- m_kernel_regions .insert (region.vaddr ().get (), region);
1164
+ m_region_tree. regions () .insert (region.vaddr ().get (), region);
1150
1165
}
1151
1166
1152
1167
void MemoryManager::unregister_kernel_region (Region& region)
1153
1168
{
1154
1169
VERIFY (region.is_kernel ());
1155
1170
SpinlockLocker lock (s_mm_lock);
1156
- m_kernel_regions .remove (region.vaddr ().get ());
1171
+ m_region_tree. regions () .remove (region.vaddr ().get ());
1157
1172
}
1158
1173
1159
1174
void MemoryManager::dump_kernel_regions ()
@@ -1167,7 +1182,7 @@ void MemoryManager::dump_kernel_regions()
1167
1182
dbgln (" BEGIN{} END{} SIZE{} ACCESS NAME" ,
1168
1183
addr_padding, addr_padding, addr_padding);
1169
1184
SpinlockLocker lock (s_mm_lock);
1170
- for (auto const & region : m_kernel_regions ) {
1185
+ for (auto const & region : m_region_tree. regions () ) {
1171
1186
dbgln (" {:p} -- {:p} {:p} {:c}{:c}{:c}{:c}{:c}{:c} {}" ,
1172
1187
region.vaddr ().get (),
1173
1188
region.vaddr ().offset (region.size () - 1 ).get (),
0 commit comments