Skip to content

Commit

Permalink
Kernel: Stop allocating physical pages for mapped MMIO regions
Browse files Browse the repository at this point in the history
As MMIO is placed at fixed physical addressed, and does not need to be
backed by real RAM physical pages, there's no need to use PhysicalPage
instances to track their pages.
This results in slightly reduced allocations, but more importantly
makes MMIO addresses which end up after the normal RAM ranges work,
like 64-bit PCI BARs usually are.
  • Loading branch information
IdanHo committed May 10, 2024
1 parent 5f6495f commit 3ca74ef
Show file tree
Hide file tree
Showing 21 changed files with 136 additions and 31 deletions.
2 changes: 1 addition & 1 deletion Kernel/Arch/x86_64/Firmware/ACPI/StaticParsing.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ ErrorOr<Optional<PhysicalAddress>> find_rsdp_in_platform_specific_memory_locatio
auto region_size_or_error = Memory::page_round_up(memory_range.length);
if (region_size_or_error.is_error())
return IterationDecision::Continue;
auto region_or_error = MM.allocate_kernel_region(memory_range.start, region_size_or_error.value(), {}, Memory::Region::Access::Read);
auto region_or_error = MM.allocate_mmio_kernel_region(memory_range.start, region_size_or_error.value(), {}, Memory::Region::Access::Read);
if (region_or_error.is_error())
return IterationDecision::Continue;
mapping.region = region_or_error.release_value();
Expand Down
4 changes: 2 additions & 2 deletions Kernel/Arch/x86_64/Firmware/PCBIOS/Mapper.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ ErrorOr<Memory::MappedROM> map_bios()
mapping.size = 128 * KiB;
mapping.paddr = PhysicalAddress(0xe0000);
auto region_size = TRY(Memory::page_round_up(mapping.size));
mapping.region = TRY(MM.allocate_kernel_region(mapping.paddr, region_size, {}, Memory::Region::Access::Read));
mapping.region = TRY(MM.allocate_mmio_kernel_region(mapping.paddr, region_size, {}, Memory::Region::Access::Read));
return mapping;
}

Expand All @@ -31,7 +31,7 @@ ErrorOr<Memory::MappedROM> map_ebda()

Memory::MappedROM mapping;
auto region_size = TRY(Memory::page_round_up(ebda_size));
mapping.region = TRY(MM.allocate_kernel_region(ebda_paddr.page_base(), region_size, {}, Memory::Region::Access::Read));
mapping.region = TRY(MM.allocate_mmio_kernel_region(ebda_paddr.page_base(), region_size, {}, Memory::Region::Access::Read));
mapping.offset = ebda_paddr.offset_in_page();
mapping.size = ebda_size;
mapping.paddr = ebda_paddr;
Expand Down
2 changes: 1 addition & 1 deletion Kernel/Arch/x86_64/Interrupts/APIC.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -254,7 +254,7 @@ UNMAP_AFTER_INIT bool APIC::init_bsp()
set_base(apic_base);

if (!m_is_x2.was_set()) {
auto region_or_error = MM.allocate_kernel_region(apic_base.page_base(), PAGE_SIZE, {}, Memory::Region::Access::ReadWrite);
auto region_or_error = MM.allocate_mmio_kernel_region(apic_base.page_base(), PAGE_SIZE, {}, Memory::Region::Access::ReadWrite);
if (region_or_error.is_error()) {
dbgln("APIC: Failed to allocate memory for APIC base");
return false;
Expand Down
2 changes: 1 addition & 1 deletion Kernel/Arch/x86_64/Time/HPET.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -417,7 +417,7 @@ u64 HPET::ns_to_raw_counter_ticks(u64 ns) const
UNMAP_AFTER_INIT HPET::HPET(PhysicalAddress acpi_hpet)
: m_physical_acpi_hpet_table(acpi_hpet)
, m_physical_acpi_hpet_registers(find_acpi_hpet_registers_block())
, m_hpet_mmio_region(MM.allocate_kernel_region(m_physical_acpi_hpet_registers.page_base(), PAGE_SIZE, "HPET MMIO"sv, Memory::Region::Access::ReadWrite).release_value())
, m_hpet_mmio_region(MM.allocate_mmio_kernel_region(m_physical_acpi_hpet_registers.page_base(), PAGE_SIZE, "HPET MMIO"sv, Memory::Region::Access::ReadWrite).release_value())
{
s_hpet = this; // Make available as soon as possible so that IRQs can use it

Expand Down
2 changes: 1 addition & 1 deletion Kernel/Bus/PCI/Access.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,7 @@ UNMAP_AFTER_INIT bool Access::find_and_register_pci_host_bridges_from_acpi_mcfg_
dbgln("Failed to round up length of {} to pages", length);
return false;
}
auto mcfg_region_or_error = MM.allocate_kernel_region(mcfg_table.page_base(), region_size_or_error.value(), "PCI Parsing MCFG"sv, Memory::Region::Access::ReadWrite);
auto mcfg_region_or_error = MM.allocate_mmio_kernel_region(mcfg_table.page_base(), region_size_or_error.value(), "PCI Parsing MCFG"sv, Memory::Region::Access::ReadWrite);
if (mcfg_region_or_error.is_error())
return false;
auto& mcfg = *(ACPI::Structures::MCFG*)mcfg_region_or_error.value()->vaddr().offset(mcfg_table.offset_in_page()).as_ptr();
Expand Down
2 changes: 1 addition & 1 deletion Kernel/Bus/PCI/Controller/MemoryBackedHostBridge.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ void MemoryBackedHostBridge::map_bus_region(BusNumber bus)
if (m_mapped_bus == bus && m_mapped_bus_region)
return;
auto bus_base_address = determine_memory_mapped_bus_base_address(bus);
auto region_or_error = MM.allocate_kernel_region(bus_base_address, memory_range_per_bus, "PCI ECAM"sv, Memory::Region::Access::ReadWrite);
auto region_or_error = MM.allocate_mmio_kernel_region(bus_base_address, memory_range_per_bus, "PCI ECAM"sv, Memory::Region::Access::ReadWrite);
// FIXME: Find a way to propagate error from here.
if (region_or_error.is_error())
VERIFY_NOT_REACHED();
Expand Down
2 changes: 1 addition & 1 deletion Kernel/Bus/USB/EHCI/EHCIController.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ ErrorOr<NonnullLockRefPtr<EHCIController>> EHCIController::try_to_initialize(con
auto pci_bar_space_size = PCI::get_BAR_space_size(pci_device_identifier, SpaceBaseAddressRegister);

auto register_region_size = TRY(Memory::page_round_up(pci_bar_address.offset_in_page() + pci_bar_space_size));
auto register_region = TRY(MM.allocate_kernel_region(pci_bar_address.page_base(), register_region_size, {}, Memory::Region::Access::ReadWrite));
auto register_region = TRY(MM.allocate_mmio_kernel_region(pci_bar_address.page_base(), register_region_size, {}, Memory::Region::Access::ReadWrite));

VirtualAddress register_base_address = register_region->vaddr().offset(pci_bar_address.offset_in_page());

Expand Down
1 change: 1 addition & 0 deletions Kernel/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -242,6 +242,7 @@ set(KERNEL_SOURCES
Memory/AnonymousVMObject.cpp
Memory/InodeVMObject.cpp
Memory/MemoryManager.cpp
Memory/MMIOVMObject.cpp
Memory/PhysicalPage.cpp
Memory/PhysicalRegion.cpp
Memory/PhysicalZone.cpp
Expand Down
2 changes: 1 addition & 1 deletion Kernel/Devices/GPU/Console/BootFramebufferConsole.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ BootFramebufferConsole::BootFramebufferConsole(PhysicalAddress framebuffer_addr,
{
// NOTE: We're very early in the boot process, memory allocations shouldn't really fail
auto framebuffer_end = Memory::page_round_up(framebuffer_addr.offset(height * pitch).get()).release_value();
m_framebuffer = MM.allocate_kernel_region(framebuffer_addr.page_base(), framebuffer_end - framebuffer_addr.page_base().get(), "Boot Framebuffer"sv, Memory::Region::Access::ReadWrite).release_value();
m_framebuffer = MM.allocate_mmio_kernel_region(framebuffer_addr.page_base(), framebuffer_end - framebuffer_addr.page_base().get(), "Boot Framebuffer"sv, Memory::Region::Access::ReadWrite).release_value();

[[maybe_unused]] auto result = m_framebuffer->set_write_combine(true);
m_framebuffer_data = m_framebuffer->vaddr().offset(framebuffer_addr.offset_in_page()).as_ptr();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ void ContiguousFramebufferConsole::set_resolution(size_t width, size_t height, s

size_t size = Memory::page_round_up(pitch * height).release_value_but_fixme_should_propagate_errors();
dbgln("Framebuffer Console: taking {} bytes", size);
auto region_or_error = MM.allocate_kernel_region(m_framebuffer_address, size, "Framebuffer Console"sv, Memory::Region::Access::ReadWrite, Memory::Region::Cacheable::Yes);
auto region_or_error = MM.allocate_mmio_kernel_region(m_framebuffer_address, size, "Framebuffer Console"sv, Memory::Region::Access::ReadWrite, Memory::Region::Cacheable::Yes);
VERIFY(!region_or_error.is_error());
m_framebuffer_region = region_or_error.release_value();

Expand Down
2 changes: 1 addition & 1 deletion Kernel/Devices/GPU/Console/VGATextModeConsole.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ namespace Kernel::Graphics {
UNMAP_AFTER_INIT NonnullLockRefPtr<VGATextModeConsole> VGATextModeConsole::initialize()
{
auto vga_window_size = MUST(Memory::page_round_up(0xc0000 - 0xa0000));
auto vga_window_region = MUST(MM.allocate_kernel_region(PhysicalAddress(0xa0000), vga_window_size, "VGA Display"sv, Memory::Region::Access::ReadWrite));
auto vga_window_region = MUST(MM.allocate_mmio_kernel_region(PhysicalAddress(0xa0000), vga_window_size, "VGA Display"sv, Memory::Region::Access::ReadWrite));
return adopt_lock_ref(*new (nothrow) VGATextModeConsole(move(vga_window_region)));
}

Expand Down
2 changes: 1 addition & 1 deletion Kernel/Devices/GPU/DisplayConnector.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,7 @@ ErrorOr<void> DisplayConnector::allocate_framebuffer_resources(size_t rounded_si
if (!m_framebuffer_at_arbitrary_physical_range) {
VERIFY(m_framebuffer_address.value().page_base() == m_framebuffer_address.value());
m_shared_framebuffer_vmobject = TRY(Memory::SharedFramebufferVMObject::try_create_for_physical_range(m_framebuffer_address.value(), rounded_size));
m_framebuffer_region = TRY(MM.allocate_kernel_region(m_framebuffer_address.value().page_base(), rounded_size, "Framebuffer"sv, Memory::Region::Access::ReadWrite));
m_framebuffer_region = TRY(MM.allocate_mmio_kernel_region(m_framebuffer_address.value().page_base(), rounded_size, "Framebuffer"sv, Memory::Region::Access::ReadWrite));
} else {
m_shared_framebuffer_vmobject = TRY(Memory::SharedFramebufferVMObject::try_create_at_arbitrary_physical_range(rounded_size));
m_framebuffer_region = TRY(MM.allocate_kernel_region_with_vmobject(m_shared_framebuffer_vmobject->real_writes_framebuffer_vmobject(), rounded_size, "Framebuffer"sv, Memory::Region::Access::ReadWrite));
Expand Down
2 changes: 1 addition & 1 deletion Kernel/Devices/GPU/Intel/DisplayConnectorGroup.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ namespace Kernel {

ErrorOr<NonnullLockRefPtr<IntelDisplayConnectorGroup>> IntelDisplayConnectorGroup::try_create(Badge<IntelNativeGraphicsAdapter>, IntelGraphics::Generation generation, MMIORegion const& first_region, MMIORegion const& second_region)
{
auto registers_region = TRY(MM.allocate_kernel_region(first_region.pci_bar_paddr, first_region.pci_bar_space_length, "Intel Native Graphics Registers"sv, Memory::Region::Access::ReadWrite));
auto registers_region = TRY(MM.allocate_mmio_kernel_region(first_region.pci_bar_paddr, first_region.pci_bar_space_length, "Intel Native Graphics Registers"sv, Memory::Region::Access::ReadWrite));
// NOTE: 0x5100 is the offset of the start of the GMBus registers
auto gmbus_connector = TRY(GMBusConnector::create_with_physical_address(first_region.pci_bar_paddr.offset(0x5100)));
auto connector_group = TRY(adopt_nonnull_lock_ref_or_enomem(new (nothrow) IntelDisplayConnectorGroup(generation, move(gmbus_connector), move(registers_region), first_region, second_region)));
Expand Down
4 changes: 2 additions & 2 deletions Kernel/Devices/Storage/ATA/AHCI/Port.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -518,7 +518,7 @@ bool AHCIPort::access_device(AsyncBlockDeviceRequest::RequestType direction, u64

dbgln_if(AHCI_DEBUG, "AHCI Port {}: CLE: ctba={:#08x}, ctbau={:#08x}, prdbc={:#08x}, prdtl={:#04x}, attributes={:#04x}", representative_port_index(), (u32)command_list_entries[unused_command_header.value()].ctba, (u32)command_list_entries[unused_command_header.value()].ctbau, (u32)command_list_entries[unused_command_header.value()].prdbc, (u16)command_list_entries[unused_command_header.value()].prdtl, (u16)command_list_entries[unused_command_header.value()].attributes);

auto command_table_region = MM.allocate_kernel_region(m_command_table_pages[unused_command_header.value()]->paddr().page_base(), Memory::page_round_up(sizeof(AHCI::CommandTable)).value(), "AHCI Command Table"sv, Memory::Region::Access::ReadWrite, Memory::Region::Cacheable::No).release_value();
auto command_table_region = MM.allocate_kernel_region_with_physical_pages({ &m_command_table_pages[unused_command_header.value()], 1 }, "AHCI Command Table"sv, Memory::Region::Access::ReadWrite, Memory::Region::Cacheable::No).release_value();
auto& command_table = *(volatile AHCI::CommandTable*)command_table_region->vaddr().as_ptr();

dbgln_if(AHCI_DEBUG, "AHCI Port {}: Allocated command table at {}", representative_port_index(), command_table_region->vaddr());
Expand Down Expand Up @@ -606,7 +606,7 @@ bool AHCIPort::identify_device()
// QEMU doesn't care if we don't set the correct CFL field in this register, real hardware will set an handshake error bit in PxSERR register.
command_list_entries[unused_command_header.value()].attributes = (size_t)FIS::DwordCount::RegisterHostToDevice | AHCI::CommandHeaderAttributes::P;

auto command_table_region = MM.allocate_kernel_region(m_command_table_pages[unused_command_header.value()]->paddr().page_base(), Memory::page_round_up(sizeof(AHCI::CommandTable)).value(), "AHCI Command Table"sv, Memory::Region::Access::ReadWrite).release_value();
auto command_table_region = MM.allocate_kernel_region_with_physical_pages({ &m_command_table_pages[unused_command_header.value()], 1 }, "AHCI Command Table"sv, Memory::Region::Access::ReadWrite).release_value();
auto& command_table = *(volatile AHCI::CommandTable*)command_table_region->vaddr().as_ptr();
memset(const_cast<u8*>(command_table.command_fis), 0, 64);
command_table.descriptors[0].base_high = 0;
Expand Down
31 changes: 31 additions & 0 deletions Kernel/Memory/MMIOVMObject.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
/*
* Copyright (c) 2024, Idan Horowitz <idan.horowitz@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/

#include <Kernel/Memory/MMIOVMObject.h>

namespace Kernel::Memory {

ErrorOr<NonnullLockRefPtr<MMIOVMObject>> MMIOVMObject::try_create_for_physical_range(PhysicalAddress paddr, size_t size)
{
if (paddr.offset(size) < paddr) {
dbgln("Shenanigans! MMIOVMObject::try_create_for_physical_range({}, {}) would wrap around", paddr, size);
// Since we can't wrap around yet, let's pretend to OOM.
return ENOMEM;
}

// FIXME: We have to make this allocation because VMObject determines the size of the VMObject based on the physical pages array
auto new_physical_pages = TRY(VMObject::try_create_physical_pages(size));

return adopt_nonnull_lock_ref_or_enomem(new (nothrow) MMIOVMObject(paddr, move(new_physical_pages)));
}

MMIOVMObject::MMIOVMObject(PhysicalAddress paddr, FixedArray<RefPtr<PhysicalPage>>&& new_physical_pages)
: VMObject(move(new_physical_pages))
{
VERIFY(paddr.page_base() == paddr);
}

}
26 changes: 26 additions & 0 deletions Kernel/Memory/MMIOVMObject.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
/*
* Copyright (c) 2024, Idan Horowitz <idan.horowitz@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/

#pragma once

#include <Kernel/Memory/PhysicalAddress.h>
#include <Kernel/Memory/VMObject.h>

namespace Kernel::Memory {

class MMIOVMObject final : public VMObject {
public:
static ErrorOr<NonnullLockRefPtr<MMIOVMObject>> try_create_for_physical_range(PhysicalAddress paddr, size_t size);

virtual ErrorOr<NonnullLockRefPtr<VMObject>> try_clone() override { return ENOTSUP; }

private:
MMIOVMObject(PhysicalAddress, FixedArray<RefPtr<PhysicalPage>>&&);

virtual StringView class_name() const override { return "MMIOVMObject"sv; }
};

}
28 changes: 21 additions & 7 deletions Kernel/Memory/MemoryManager.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@
#include <Kernel/Library/Panic.h>
#include <Kernel/Library/StdLib.h>
#include <Kernel/Memory/AnonymousVMObject.h>
#include <Kernel/Memory/MMIOVMObject.h>
#include <Kernel/Memory/MemoryManager.h>
#include <Kernel/Memory/PhysicalRegion.h>
#include <Kernel/Memory/SharedInodeVMObject.h>
Expand Down Expand Up @@ -1069,9 +1070,10 @@ ErrorOr<NonnullOwnPtr<Region>> MemoryManager::allocate_contiguous_kernel_region(

ErrorOr<NonnullOwnPtr<Memory::Region>> MemoryManager::allocate_dma_buffer_page(StringView name, Memory::Region::Access access, RefPtr<Memory::PhysicalPage>& dma_buffer_page)
{
dma_buffer_page = TRY(allocate_physical_page());
auto page = TRY(allocate_physical_page());
dma_buffer_page = page;
// Do not enable Cache for this region as physical memory transfers are performed (Most architectures have this behavior by default)
return allocate_kernel_region(dma_buffer_page->paddr(), PAGE_SIZE, name, access, Region::Cacheable::No);
return allocate_kernel_region_with_physical_pages({ &page, 1 }, name, access, Region::Cacheable::No);
}

ErrorOr<NonnullOwnPtr<Memory::Region>> MemoryManager::allocate_dma_buffer_page(StringView name, Memory::Region::Access access)
Expand All @@ -1086,7 +1088,7 @@ ErrorOr<NonnullOwnPtr<Memory::Region>> MemoryManager::allocate_dma_buffer_pages(
VERIFY(!(size % PAGE_SIZE));
dma_buffer_pages = TRY(allocate_contiguous_physical_pages(size));
// Do not enable Cache for this region as physical memory transfers are performed (Most architectures have this behavior by default)
return allocate_kernel_region(dma_buffer_pages.first()->paddr(), size, name, access, Region::Cacheable::No);
return allocate_kernel_region_with_physical_pages(dma_buffer_pages, name, access, Region::Cacheable::No);
}

ErrorOr<NonnullOwnPtr<Memory::Region>> MemoryManager::allocate_dma_buffer_pages(size_t size, StringView name, Memory::Region::Access access)
Expand All @@ -1110,16 +1112,28 @@ ErrorOr<NonnullOwnPtr<Region>> MemoryManager::allocate_kernel_region(size_t size
return region;
}

ErrorOr<NonnullOwnPtr<Region>> MemoryManager::allocate_kernel_region(PhysicalAddress paddr, size_t size, StringView name, Region::Access access, Region::Cacheable cacheable)
ErrorOr<NonnullOwnPtr<Region>> MemoryManager::allocate_kernel_region_with_physical_pages(Span<NonnullRefPtr<PhysicalPage>> pages, StringView name, Region::Access access, Region::Cacheable cacheable)
{
auto vmobject = TRY(AnonymousVMObject::try_create_with_physical_pages(pages));
OwnPtr<KString> name_kstring;
if (!name.is_null())
name_kstring = TRY(KString::try_create(name));
auto region = TRY(Region::create_unplaced(move(vmobject), 0, move(name_kstring), access, cacheable));
TRY(m_global_data.with([&](auto& global_data) { return global_data.region_tree.place_anywhere(*region, RandomizeVirtualAddress::No, pages.size() * PAGE_SIZE, PAGE_SIZE); }));
TRY(region->map(kernel_page_directory()));
return region;
}

ErrorOr<NonnullOwnPtr<Region>> MemoryManager::allocate_mmio_kernel_region(PhysicalAddress paddr, size_t size, StringView name, Region::Access access, Region::Cacheable cacheable)
{
VERIFY(!(size % PAGE_SIZE));
auto vmobject = TRY(AnonymousVMObject::try_create_for_physical_range(paddr, size));
auto vmobject = TRY(MMIOVMObject::try_create_for_physical_range(paddr, size));
OwnPtr<KString> name_kstring;
if (!name.is_null())
name_kstring = TRY(KString::try_create(name));
auto region = TRY(Region::create_unplaced(move(vmobject), 0, move(name_kstring), access, cacheable));
TRY(m_global_data.with([&](auto& global_data) { return global_data.region_tree.place_anywhere(*region, RandomizeVirtualAddress::No, size, PAGE_SIZE); }));
TRY(region->map(kernel_page_directory()));
TRY(region->map(kernel_page_directory(), paddr));
return region;
}

Expand Down Expand Up @@ -1329,7 +1343,7 @@ ErrorOr<Vector<NonnullRefPtr<PhysicalPage>>> MemoryManager::allocate_contiguous_
}));

{
auto cleanup_region = TRY(MM.allocate_kernel_region(physical_pages[0]->paddr(), PAGE_SIZE * page_count, {}, Region::Access::Read | Region::Access::Write));
auto cleanup_region = TRY(MM.allocate_kernel_region_with_physical_pages(physical_pages, {}, Region::Access::Read | Region::Access::Write));
memset(cleanup_region->vaddr().as_ptr(), 0, PAGE_SIZE * page_count);
}
return physical_pages;
Expand Down
3 changes: 2 additions & 1 deletion Kernel/Memory/MemoryManager.h
Original file line number Diff line number Diff line change
Expand Up @@ -174,7 +174,8 @@ class MemoryManager {
ErrorOr<NonnullOwnPtr<Memory::Region>> allocate_dma_buffer_pages(size_t size, StringView name, Memory::Region::Access access, Vector<NonnullRefPtr<Memory::PhysicalPage>>& dma_buffer_pages);
ErrorOr<NonnullOwnPtr<Memory::Region>> allocate_dma_buffer_pages(size_t size, StringView name, Memory::Region::Access access);
ErrorOr<NonnullOwnPtr<Region>> allocate_kernel_region(size_t, StringView name, Region::Access access, AllocationStrategy strategy = AllocationStrategy::Reserve, Region::Cacheable = Region::Cacheable::Yes);
ErrorOr<NonnullOwnPtr<Region>> allocate_kernel_region(PhysicalAddress, size_t, StringView name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes);
ErrorOr<NonnullOwnPtr<Region>> allocate_mmio_kernel_region(PhysicalAddress, size_t, StringView name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes);
ErrorOr<NonnullOwnPtr<Region>> allocate_kernel_region_with_physical_pages(Span<NonnullRefPtr<PhysicalPage>>, StringView name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes);
ErrorOr<NonnullOwnPtr<Region>> allocate_kernel_region_with_vmobject(VMObject&, size_t, StringView name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes);
ErrorOr<NonnullOwnPtr<Region>> allocate_unbacked_region_anywhere(size_t size, size_t alignment);
ErrorOr<NonnullOwnPtr<Region>> create_identity_mapped_region(PhysicalAddress, size_t);
Expand Down
Loading

0 comments on commit 3ca74ef

Please sign in to comment.