From 3ca74ef928e87058d1d386621ec3141f762e091d Mon Sep 17 00:00:00 2001 From: Idan Horowitz Date: Fri, 10 May 2024 23:16:01 +0300 Subject: [PATCH] Kernel: Stop allocating physical pages for mapped MMIO regions As MMIO is placed at fixed physical addressed, and does not need to be backed by real RAM physical pages, there's no need to use PhysicalPage instances to track their pages. This results in slightly reduced allocations, but more importantly makes MMIO addresses which end up after the normal RAM ranges work, like 64-bit PCI BARs usually are. --- .../x86_64/Firmware/ACPI/StaticParsing.cpp | 2 +- Kernel/Arch/x86_64/Firmware/PCBIOS/Mapper.cpp | 4 +- Kernel/Arch/x86_64/Interrupts/APIC.cpp | 2 +- Kernel/Arch/x86_64/Time/HPET.cpp | 2 +- Kernel/Bus/PCI/Access.cpp | 2 +- .../PCI/Controller/MemoryBackedHostBridge.cpp | 2 +- Kernel/Bus/USB/EHCI/EHCIController.cpp | 2 +- Kernel/CMakeLists.txt | 1 + .../GPU/Console/BootFramebufferConsole.cpp | 2 +- .../Console/ContiguousFramebufferConsole.cpp | 2 +- .../GPU/Console/VGATextModeConsole.cpp | 2 +- Kernel/Devices/GPU/DisplayConnector.cpp | 2 +- .../GPU/Intel/DisplayConnectorGroup.cpp | 2 +- Kernel/Devices/Storage/ATA/AHCI/Port.cpp | 4 +- Kernel/Memory/MMIOVMObject.cpp | 31 ++++++++++++++ Kernel/Memory/MMIOVMObject.h | 26 ++++++++++++ Kernel/Memory/MemoryManager.cpp | 28 +++++++++---- Kernel/Memory/MemoryManager.h | 3 +- Kernel/Memory/Region.cpp | 41 ++++++++++++++++--- Kernel/Memory/Region.h | 3 ++ Kernel/Memory/TypedMapping.h | 4 +- 21 files changed, 136 insertions(+), 31 deletions(-) create mode 100644 Kernel/Memory/MMIOVMObject.cpp create mode 100644 Kernel/Memory/MMIOVMObject.h diff --git a/Kernel/Arch/x86_64/Firmware/ACPI/StaticParsing.cpp b/Kernel/Arch/x86_64/Firmware/ACPI/StaticParsing.cpp index 0f8d0aff727ea1..18e12b53c8889e 100644 --- a/Kernel/Arch/x86_64/Firmware/ACPI/StaticParsing.cpp +++ b/Kernel/Arch/x86_64/Firmware/ACPI/StaticParsing.cpp @@ -37,7 +37,7 @@ ErrorOr> find_rsdp_in_platform_specific_memory_locatio auto region_size_or_error = Memory::page_round_up(memory_range.length); if (region_size_or_error.is_error()) return IterationDecision::Continue; - auto region_or_error = MM.allocate_kernel_region(memory_range.start, region_size_or_error.value(), {}, Memory::Region::Access::Read); + auto region_or_error = MM.allocate_mmio_kernel_region(memory_range.start, region_size_or_error.value(), {}, Memory::Region::Access::Read); if (region_or_error.is_error()) return IterationDecision::Continue; mapping.region = region_or_error.release_value(); diff --git a/Kernel/Arch/x86_64/Firmware/PCBIOS/Mapper.cpp b/Kernel/Arch/x86_64/Firmware/PCBIOS/Mapper.cpp index de37889b3b6d16..acf2593425a4d2 100644 --- a/Kernel/Arch/x86_64/Firmware/PCBIOS/Mapper.cpp +++ b/Kernel/Arch/x86_64/Firmware/PCBIOS/Mapper.cpp @@ -17,7 +17,7 @@ ErrorOr map_bios() mapping.size = 128 * KiB; mapping.paddr = PhysicalAddress(0xe0000); auto region_size = TRY(Memory::page_round_up(mapping.size)); - mapping.region = TRY(MM.allocate_kernel_region(mapping.paddr, region_size, {}, Memory::Region::Access::Read)); + mapping.region = TRY(MM.allocate_mmio_kernel_region(mapping.paddr, region_size, {}, Memory::Region::Access::Read)); return mapping; } @@ -31,7 +31,7 @@ ErrorOr map_ebda() Memory::MappedROM mapping; auto region_size = TRY(Memory::page_round_up(ebda_size)); - mapping.region = TRY(MM.allocate_kernel_region(ebda_paddr.page_base(), region_size, {}, Memory::Region::Access::Read)); + mapping.region = TRY(MM.allocate_mmio_kernel_region(ebda_paddr.page_base(), region_size, {}, Memory::Region::Access::Read)); mapping.offset = ebda_paddr.offset_in_page(); mapping.size = ebda_size; mapping.paddr = ebda_paddr; diff --git a/Kernel/Arch/x86_64/Interrupts/APIC.cpp b/Kernel/Arch/x86_64/Interrupts/APIC.cpp index 3d6682db3192de..7edb653516f6d0 100644 --- a/Kernel/Arch/x86_64/Interrupts/APIC.cpp +++ b/Kernel/Arch/x86_64/Interrupts/APIC.cpp @@ -254,7 +254,7 @@ UNMAP_AFTER_INIT bool APIC::init_bsp() set_base(apic_base); if (!m_is_x2.was_set()) { - auto region_or_error = MM.allocate_kernel_region(apic_base.page_base(), PAGE_SIZE, {}, Memory::Region::Access::ReadWrite); + auto region_or_error = MM.allocate_mmio_kernel_region(apic_base.page_base(), PAGE_SIZE, {}, Memory::Region::Access::ReadWrite); if (region_or_error.is_error()) { dbgln("APIC: Failed to allocate memory for APIC base"); return false; diff --git a/Kernel/Arch/x86_64/Time/HPET.cpp b/Kernel/Arch/x86_64/Time/HPET.cpp index f12dd08a43df14..43353eee1d7470 100644 --- a/Kernel/Arch/x86_64/Time/HPET.cpp +++ b/Kernel/Arch/x86_64/Time/HPET.cpp @@ -417,7 +417,7 @@ u64 HPET::ns_to_raw_counter_ticks(u64 ns) const UNMAP_AFTER_INIT HPET::HPET(PhysicalAddress acpi_hpet) : m_physical_acpi_hpet_table(acpi_hpet) , m_physical_acpi_hpet_registers(find_acpi_hpet_registers_block()) - , m_hpet_mmio_region(MM.allocate_kernel_region(m_physical_acpi_hpet_registers.page_base(), PAGE_SIZE, "HPET MMIO"sv, Memory::Region::Access::ReadWrite).release_value()) + , m_hpet_mmio_region(MM.allocate_mmio_kernel_region(m_physical_acpi_hpet_registers.page_base(), PAGE_SIZE, "HPET MMIO"sv, Memory::Region::Access::ReadWrite).release_value()) { s_hpet = this; // Make available as soon as possible so that IRQs can use it diff --git a/Kernel/Bus/PCI/Access.cpp b/Kernel/Bus/PCI/Access.cpp index a911eaac99bb03..0fd5f4eed51e38 100644 --- a/Kernel/Bus/PCI/Access.cpp +++ b/Kernel/Bus/PCI/Access.cpp @@ -79,7 +79,7 @@ UNMAP_AFTER_INIT bool Access::find_and_register_pci_host_bridges_from_acpi_mcfg_ dbgln("Failed to round up length of {} to pages", length); return false; } - auto mcfg_region_or_error = MM.allocate_kernel_region(mcfg_table.page_base(), region_size_or_error.value(), "PCI Parsing MCFG"sv, Memory::Region::Access::ReadWrite); + auto mcfg_region_or_error = MM.allocate_mmio_kernel_region(mcfg_table.page_base(), region_size_or_error.value(), "PCI Parsing MCFG"sv, Memory::Region::Access::ReadWrite); if (mcfg_region_or_error.is_error()) return false; auto& mcfg = *(ACPI::Structures::MCFG*)mcfg_region_or_error.value()->vaddr().offset(mcfg_table.offset_in_page()).as_ptr(); diff --git a/Kernel/Bus/PCI/Controller/MemoryBackedHostBridge.cpp b/Kernel/Bus/PCI/Controller/MemoryBackedHostBridge.cpp index a6c6c6bb88ea16..a9079d0402868b 100644 --- a/Kernel/Bus/PCI/Controller/MemoryBackedHostBridge.cpp +++ b/Kernel/Bus/PCI/Controller/MemoryBackedHostBridge.cpp @@ -69,7 +69,7 @@ void MemoryBackedHostBridge::map_bus_region(BusNumber bus) if (m_mapped_bus == bus && m_mapped_bus_region) return; auto bus_base_address = determine_memory_mapped_bus_base_address(bus); - auto region_or_error = MM.allocate_kernel_region(bus_base_address, memory_range_per_bus, "PCI ECAM"sv, Memory::Region::Access::ReadWrite); + auto region_or_error = MM.allocate_mmio_kernel_region(bus_base_address, memory_range_per_bus, "PCI ECAM"sv, Memory::Region::Access::ReadWrite); // FIXME: Find a way to propagate error from here. if (region_or_error.is_error()) VERIFY_NOT_REACHED(); diff --git a/Kernel/Bus/USB/EHCI/EHCIController.cpp b/Kernel/Bus/USB/EHCI/EHCIController.cpp index be1c255a35dbdf..484e1bf1bb6c5f 100644 --- a/Kernel/Bus/USB/EHCI/EHCIController.cpp +++ b/Kernel/Bus/USB/EHCI/EHCIController.cpp @@ -18,7 +18,7 @@ ErrorOr> EHCIController::try_to_initialize(con auto pci_bar_space_size = PCI::get_BAR_space_size(pci_device_identifier, SpaceBaseAddressRegister); auto register_region_size = TRY(Memory::page_round_up(pci_bar_address.offset_in_page() + pci_bar_space_size)); - auto register_region = TRY(MM.allocate_kernel_region(pci_bar_address.page_base(), register_region_size, {}, Memory::Region::Access::ReadWrite)); + auto register_region = TRY(MM.allocate_mmio_kernel_region(pci_bar_address.page_base(), register_region_size, {}, Memory::Region::Access::ReadWrite)); VirtualAddress register_base_address = register_region->vaddr().offset(pci_bar_address.offset_in_page()); diff --git a/Kernel/CMakeLists.txt b/Kernel/CMakeLists.txt index 4772925f7491e6..bed016b33bdd1e 100644 --- a/Kernel/CMakeLists.txt +++ b/Kernel/CMakeLists.txt @@ -242,6 +242,7 @@ set(KERNEL_SOURCES Memory/AnonymousVMObject.cpp Memory/InodeVMObject.cpp Memory/MemoryManager.cpp + Memory/MMIOVMObject.cpp Memory/PhysicalPage.cpp Memory/PhysicalRegion.cpp Memory/PhysicalZone.cpp diff --git a/Kernel/Devices/GPU/Console/BootFramebufferConsole.cpp b/Kernel/Devices/GPU/Console/BootFramebufferConsole.cpp index 6b090847f558c2..8a77ec04b64ea7 100644 --- a/Kernel/Devices/GPU/Console/BootFramebufferConsole.cpp +++ b/Kernel/Devices/GPU/Console/BootFramebufferConsole.cpp @@ -15,7 +15,7 @@ BootFramebufferConsole::BootFramebufferConsole(PhysicalAddress framebuffer_addr, { // NOTE: We're very early in the boot process, memory allocations shouldn't really fail auto framebuffer_end = Memory::page_round_up(framebuffer_addr.offset(height * pitch).get()).release_value(); - m_framebuffer = MM.allocate_kernel_region(framebuffer_addr.page_base(), framebuffer_end - framebuffer_addr.page_base().get(), "Boot Framebuffer"sv, Memory::Region::Access::ReadWrite).release_value(); + m_framebuffer = MM.allocate_mmio_kernel_region(framebuffer_addr.page_base(), framebuffer_end - framebuffer_addr.page_base().get(), "Boot Framebuffer"sv, Memory::Region::Access::ReadWrite).release_value(); [[maybe_unused]] auto result = m_framebuffer->set_write_combine(true); m_framebuffer_data = m_framebuffer->vaddr().offset(framebuffer_addr.offset_in_page()).as_ptr(); diff --git a/Kernel/Devices/GPU/Console/ContiguousFramebufferConsole.cpp b/Kernel/Devices/GPU/Console/ContiguousFramebufferConsole.cpp index 093ba5b4887d7e..eecc63160ee5ca 100644 --- a/Kernel/Devices/GPU/Console/ContiguousFramebufferConsole.cpp +++ b/Kernel/Devices/GPU/Console/ContiguousFramebufferConsole.cpp @@ -29,7 +29,7 @@ void ContiguousFramebufferConsole::set_resolution(size_t width, size_t height, s size_t size = Memory::page_round_up(pitch * height).release_value_but_fixme_should_propagate_errors(); dbgln("Framebuffer Console: taking {} bytes", size); - auto region_or_error = MM.allocate_kernel_region(m_framebuffer_address, size, "Framebuffer Console"sv, Memory::Region::Access::ReadWrite, Memory::Region::Cacheable::Yes); + auto region_or_error = MM.allocate_mmio_kernel_region(m_framebuffer_address, size, "Framebuffer Console"sv, Memory::Region::Access::ReadWrite, Memory::Region::Cacheable::Yes); VERIFY(!region_or_error.is_error()); m_framebuffer_region = region_or_error.release_value(); diff --git a/Kernel/Devices/GPU/Console/VGATextModeConsole.cpp b/Kernel/Devices/GPU/Console/VGATextModeConsole.cpp index 4d164bb125dc08..5249c0412b456b 100644 --- a/Kernel/Devices/GPU/Console/VGATextModeConsole.cpp +++ b/Kernel/Devices/GPU/Console/VGATextModeConsole.cpp @@ -13,7 +13,7 @@ namespace Kernel::Graphics { UNMAP_AFTER_INIT NonnullLockRefPtr VGATextModeConsole::initialize() { auto vga_window_size = MUST(Memory::page_round_up(0xc0000 - 0xa0000)); - auto vga_window_region = MUST(MM.allocate_kernel_region(PhysicalAddress(0xa0000), vga_window_size, "VGA Display"sv, Memory::Region::Access::ReadWrite)); + auto vga_window_region = MUST(MM.allocate_mmio_kernel_region(PhysicalAddress(0xa0000), vga_window_size, "VGA Display"sv, Memory::Region::Access::ReadWrite)); return adopt_lock_ref(*new (nothrow) VGATextModeConsole(move(vga_window_region))); } diff --git a/Kernel/Devices/GPU/DisplayConnector.cpp b/Kernel/Devices/GPU/DisplayConnector.cpp index b6e6c246aa2c7a..4da885db4416d6 100644 --- a/Kernel/Devices/GPU/DisplayConnector.cpp +++ b/Kernel/Devices/GPU/DisplayConnector.cpp @@ -80,7 +80,7 @@ ErrorOr DisplayConnector::allocate_framebuffer_resources(size_t rounded_si if (!m_framebuffer_at_arbitrary_physical_range) { VERIFY(m_framebuffer_address.value().page_base() == m_framebuffer_address.value()); m_shared_framebuffer_vmobject = TRY(Memory::SharedFramebufferVMObject::try_create_for_physical_range(m_framebuffer_address.value(), rounded_size)); - m_framebuffer_region = TRY(MM.allocate_kernel_region(m_framebuffer_address.value().page_base(), rounded_size, "Framebuffer"sv, Memory::Region::Access::ReadWrite)); + m_framebuffer_region = TRY(MM.allocate_mmio_kernel_region(m_framebuffer_address.value().page_base(), rounded_size, "Framebuffer"sv, Memory::Region::Access::ReadWrite)); } else { m_shared_framebuffer_vmobject = TRY(Memory::SharedFramebufferVMObject::try_create_at_arbitrary_physical_range(rounded_size)); m_framebuffer_region = TRY(MM.allocate_kernel_region_with_vmobject(m_shared_framebuffer_vmobject->real_writes_framebuffer_vmobject(), rounded_size, "Framebuffer"sv, Memory::Region::Access::ReadWrite)); diff --git a/Kernel/Devices/GPU/Intel/DisplayConnectorGroup.cpp b/Kernel/Devices/GPU/Intel/DisplayConnectorGroup.cpp index 76046713af5946..3d68e0889b80c1 100644 --- a/Kernel/Devices/GPU/Intel/DisplayConnectorGroup.cpp +++ b/Kernel/Devices/GPU/Intel/DisplayConnectorGroup.cpp @@ -21,7 +21,7 @@ namespace Kernel { ErrorOr> IntelDisplayConnectorGroup::try_create(Badge, IntelGraphics::Generation generation, MMIORegion const& first_region, MMIORegion const& second_region) { - auto registers_region = TRY(MM.allocate_kernel_region(first_region.pci_bar_paddr, first_region.pci_bar_space_length, "Intel Native Graphics Registers"sv, Memory::Region::Access::ReadWrite)); + auto registers_region = TRY(MM.allocate_mmio_kernel_region(first_region.pci_bar_paddr, first_region.pci_bar_space_length, "Intel Native Graphics Registers"sv, Memory::Region::Access::ReadWrite)); // NOTE: 0x5100 is the offset of the start of the GMBus registers auto gmbus_connector = TRY(GMBusConnector::create_with_physical_address(first_region.pci_bar_paddr.offset(0x5100))); auto connector_group = TRY(adopt_nonnull_lock_ref_or_enomem(new (nothrow) IntelDisplayConnectorGroup(generation, move(gmbus_connector), move(registers_region), first_region, second_region))); diff --git a/Kernel/Devices/Storage/ATA/AHCI/Port.cpp b/Kernel/Devices/Storage/ATA/AHCI/Port.cpp index 10e4120f5a5b50..10748b6eaa726a 100644 --- a/Kernel/Devices/Storage/ATA/AHCI/Port.cpp +++ b/Kernel/Devices/Storage/ATA/AHCI/Port.cpp @@ -518,7 +518,7 @@ bool AHCIPort::access_device(AsyncBlockDeviceRequest::RequestType direction, u64 dbgln_if(AHCI_DEBUG, "AHCI Port {}: CLE: ctba={:#08x}, ctbau={:#08x}, prdbc={:#08x}, prdtl={:#04x}, attributes={:#04x}", representative_port_index(), (u32)command_list_entries[unused_command_header.value()].ctba, (u32)command_list_entries[unused_command_header.value()].ctbau, (u32)command_list_entries[unused_command_header.value()].prdbc, (u16)command_list_entries[unused_command_header.value()].prdtl, (u16)command_list_entries[unused_command_header.value()].attributes); - auto command_table_region = MM.allocate_kernel_region(m_command_table_pages[unused_command_header.value()]->paddr().page_base(), Memory::page_round_up(sizeof(AHCI::CommandTable)).value(), "AHCI Command Table"sv, Memory::Region::Access::ReadWrite, Memory::Region::Cacheable::No).release_value(); + auto command_table_region = MM.allocate_kernel_region_with_physical_pages({ &m_command_table_pages[unused_command_header.value()], 1 }, "AHCI Command Table"sv, Memory::Region::Access::ReadWrite, Memory::Region::Cacheable::No).release_value(); auto& command_table = *(volatile AHCI::CommandTable*)command_table_region->vaddr().as_ptr(); dbgln_if(AHCI_DEBUG, "AHCI Port {}: Allocated command table at {}", representative_port_index(), command_table_region->vaddr()); @@ -606,7 +606,7 @@ bool AHCIPort::identify_device() // QEMU doesn't care if we don't set the correct CFL field in this register, real hardware will set an handshake error bit in PxSERR register. command_list_entries[unused_command_header.value()].attributes = (size_t)FIS::DwordCount::RegisterHostToDevice | AHCI::CommandHeaderAttributes::P; - auto command_table_region = MM.allocate_kernel_region(m_command_table_pages[unused_command_header.value()]->paddr().page_base(), Memory::page_round_up(sizeof(AHCI::CommandTable)).value(), "AHCI Command Table"sv, Memory::Region::Access::ReadWrite).release_value(); + auto command_table_region = MM.allocate_kernel_region_with_physical_pages({ &m_command_table_pages[unused_command_header.value()], 1 }, "AHCI Command Table"sv, Memory::Region::Access::ReadWrite).release_value(); auto& command_table = *(volatile AHCI::CommandTable*)command_table_region->vaddr().as_ptr(); memset(const_cast(command_table.command_fis), 0, 64); command_table.descriptors[0].base_high = 0; diff --git a/Kernel/Memory/MMIOVMObject.cpp b/Kernel/Memory/MMIOVMObject.cpp new file mode 100644 index 00000000000000..d427325eef8436 --- /dev/null +++ b/Kernel/Memory/MMIOVMObject.cpp @@ -0,0 +1,31 @@ +/* + * Copyright (c) 2024, Idan Horowitz + * + * SPDX-License-Identifier: BSD-2-Clause + */ + +#include + +namespace Kernel::Memory { + +ErrorOr> MMIOVMObject::try_create_for_physical_range(PhysicalAddress paddr, size_t size) +{ + if (paddr.offset(size) < paddr) { + dbgln("Shenanigans! MMIOVMObject::try_create_for_physical_range({}, {}) would wrap around", paddr, size); + // Since we can't wrap around yet, let's pretend to OOM. + return ENOMEM; + } + + // FIXME: We have to make this allocation because VMObject determines the size of the VMObject based on the physical pages array + auto new_physical_pages = TRY(VMObject::try_create_physical_pages(size)); + + return adopt_nonnull_lock_ref_or_enomem(new (nothrow) MMIOVMObject(paddr, move(new_physical_pages))); +} + +MMIOVMObject::MMIOVMObject(PhysicalAddress paddr, FixedArray>&& new_physical_pages) + : VMObject(move(new_physical_pages)) +{ + VERIFY(paddr.page_base() == paddr); +} + +} diff --git a/Kernel/Memory/MMIOVMObject.h b/Kernel/Memory/MMIOVMObject.h new file mode 100644 index 00000000000000..f78892ed27da46 --- /dev/null +++ b/Kernel/Memory/MMIOVMObject.h @@ -0,0 +1,26 @@ +/* + * Copyright (c) 2024, Idan Horowitz + * + * SPDX-License-Identifier: BSD-2-Clause + */ + +#pragma once + +#include +#include + +namespace Kernel::Memory { + +class MMIOVMObject final : public VMObject { +public: + static ErrorOr> try_create_for_physical_range(PhysicalAddress paddr, size_t size); + + virtual ErrorOr> try_clone() override { return ENOTSUP; } + +private: + MMIOVMObject(PhysicalAddress, FixedArray>&&); + + virtual StringView class_name() const override { return "MMIOVMObject"sv; } +}; + +} diff --git a/Kernel/Memory/MemoryManager.cpp b/Kernel/Memory/MemoryManager.cpp index 9213c29883fd98..8a9c0756754e4e 100644 --- a/Kernel/Memory/MemoryManager.cpp +++ b/Kernel/Memory/MemoryManager.cpp @@ -21,6 +21,7 @@ #include #include #include +#include #include #include #include @@ -1069,9 +1070,10 @@ ErrorOr> MemoryManager::allocate_contiguous_kernel_region( ErrorOr> MemoryManager::allocate_dma_buffer_page(StringView name, Memory::Region::Access access, RefPtr& dma_buffer_page) { - dma_buffer_page = TRY(allocate_physical_page()); + auto page = TRY(allocate_physical_page()); + dma_buffer_page = page; // Do not enable Cache for this region as physical memory transfers are performed (Most architectures have this behavior by default) - return allocate_kernel_region(dma_buffer_page->paddr(), PAGE_SIZE, name, access, Region::Cacheable::No); + return allocate_kernel_region_with_physical_pages({ &page, 1 }, name, access, Region::Cacheable::No); } ErrorOr> MemoryManager::allocate_dma_buffer_page(StringView name, Memory::Region::Access access) @@ -1086,7 +1088,7 @@ ErrorOr> MemoryManager::allocate_dma_buffer_pages( VERIFY(!(size % PAGE_SIZE)); dma_buffer_pages = TRY(allocate_contiguous_physical_pages(size)); // Do not enable Cache for this region as physical memory transfers are performed (Most architectures have this behavior by default) - return allocate_kernel_region(dma_buffer_pages.first()->paddr(), size, name, access, Region::Cacheable::No); + return allocate_kernel_region_with_physical_pages(dma_buffer_pages, name, access, Region::Cacheable::No); } ErrorOr> MemoryManager::allocate_dma_buffer_pages(size_t size, StringView name, Memory::Region::Access access) @@ -1110,16 +1112,28 @@ ErrorOr> MemoryManager::allocate_kernel_region(size_t size return region; } -ErrorOr> MemoryManager::allocate_kernel_region(PhysicalAddress paddr, size_t size, StringView name, Region::Access access, Region::Cacheable cacheable) +ErrorOr> MemoryManager::allocate_kernel_region_with_physical_pages(Span> pages, StringView name, Region::Access access, Region::Cacheable cacheable) +{ + auto vmobject = TRY(AnonymousVMObject::try_create_with_physical_pages(pages)); + OwnPtr name_kstring; + if (!name.is_null()) + name_kstring = TRY(KString::try_create(name)); + auto region = TRY(Region::create_unplaced(move(vmobject), 0, move(name_kstring), access, cacheable)); + TRY(m_global_data.with([&](auto& global_data) { return global_data.region_tree.place_anywhere(*region, RandomizeVirtualAddress::No, pages.size() * PAGE_SIZE, PAGE_SIZE); })); + TRY(region->map(kernel_page_directory())); + return region; +} + +ErrorOr> MemoryManager::allocate_mmio_kernel_region(PhysicalAddress paddr, size_t size, StringView name, Region::Access access, Region::Cacheable cacheable) { VERIFY(!(size % PAGE_SIZE)); - auto vmobject = TRY(AnonymousVMObject::try_create_for_physical_range(paddr, size)); + auto vmobject = TRY(MMIOVMObject::try_create_for_physical_range(paddr, size)); OwnPtr name_kstring; if (!name.is_null()) name_kstring = TRY(KString::try_create(name)); auto region = TRY(Region::create_unplaced(move(vmobject), 0, move(name_kstring), access, cacheable)); TRY(m_global_data.with([&](auto& global_data) { return global_data.region_tree.place_anywhere(*region, RandomizeVirtualAddress::No, size, PAGE_SIZE); })); - TRY(region->map(kernel_page_directory())); + TRY(region->map(kernel_page_directory(), paddr)); return region; } @@ -1329,7 +1343,7 @@ ErrorOr>> MemoryManager::allocate_contiguous_ })); { - auto cleanup_region = TRY(MM.allocate_kernel_region(physical_pages[0]->paddr(), PAGE_SIZE * page_count, {}, Region::Access::Read | Region::Access::Write)); + auto cleanup_region = TRY(MM.allocate_kernel_region_with_physical_pages(physical_pages, {}, Region::Access::Read | Region::Access::Write)); memset(cleanup_region->vaddr().as_ptr(), 0, PAGE_SIZE * page_count); } return physical_pages; diff --git a/Kernel/Memory/MemoryManager.h b/Kernel/Memory/MemoryManager.h index edf4f185c9f3ec..ecc8db89ce3830 100644 --- a/Kernel/Memory/MemoryManager.h +++ b/Kernel/Memory/MemoryManager.h @@ -174,7 +174,8 @@ class MemoryManager { ErrorOr> allocate_dma_buffer_pages(size_t size, StringView name, Memory::Region::Access access, Vector>& dma_buffer_pages); ErrorOr> allocate_dma_buffer_pages(size_t size, StringView name, Memory::Region::Access access); ErrorOr> allocate_kernel_region(size_t, StringView name, Region::Access access, AllocationStrategy strategy = AllocationStrategy::Reserve, Region::Cacheable = Region::Cacheable::Yes); - ErrorOr> allocate_kernel_region(PhysicalAddress, size_t, StringView name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes); + ErrorOr> allocate_mmio_kernel_region(PhysicalAddress, size_t, StringView name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes); + ErrorOr> allocate_kernel_region_with_physical_pages(Span>, StringView name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes); ErrorOr> allocate_kernel_region_with_vmobject(VMObject&, size_t, StringView name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes); ErrorOr> allocate_unbacked_region_anywhere(size_t size, size_t alignment); ErrorOr> create_identity_mapped_region(PhysicalAddress, size_t); diff --git a/Kernel/Memory/Region.cpp b/Kernel/Memory/Region.cpp index 9a081b901de78c..59df192548a72c 100644 --- a/Kernel/Memory/Region.cpp +++ b/Kernel/Memory/Region.cpp @@ -211,6 +211,18 @@ ErrorOr Region::set_should_cow(size_t page_index, bool cow) } bool Region::map_individual_page_impl(size_t page_index, RefPtr page) +{ + if (!page) + return map_individual_page_impl(page_index, {}, false, false); + return map_individual_page_impl(page_index, page->paddr(), is_readable(), is_writable() && !page->is_shared_zero_page() && !page->is_lazy_committed_page()); +} + +bool Region::map_individual_page_impl(size_t page_index, PhysicalAddress paddr) +{ + return map_individual_page_impl(page_index, paddr, is_readable(), is_writable()); +} + +bool Region::map_individual_page_impl(size_t page_index, PhysicalAddress paddr, bool readable, bool writeable) { VERIFY(m_page_directory->get_lock().is_locked_by_current_processor()); @@ -225,18 +237,15 @@ bool Region::map_individual_page_impl(size_t page_index, RefPtr pa if (!pte) return false; - if (!page || (!is_readable() && !is_writable())) { + if (!readable && !writeable) { pte->clear(); return true; } pte->set_cache_disabled(!m_cacheable); - pte->set_physical_page_base(page->paddr().get()); + pte->set_physical_page_base(paddr.get()); pte->set_present(true); - if (page->is_shared_zero_page() || page->is_lazy_committed_page() || should_cow(page_index)) - pte->set_writable(false); - else - pte->set_writable(is_writable()); + pte->set_writable(writeable && !should_cow(page_index)); if (Processor::current().has_nx()) pte->set_execute_disabled(!is_executable()); if (Processor::current().has_pat()) @@ -323,6 +332,26 @@ ErrorOr Region::map(PageDirectory& page_directory, ShouldFlushTLB should_f return ENOMEM; } +ErrorOr Region::map(PageDirectory& page_directory, PhysicalAddress paddr, ShouldFlushTLB should_flush_tlb) +{ + SpinlockLocker page_lock(page_directory.get_lock()); + set_page_directory(page_directory); + size_t page_index = 0; + while (page_index < page_count()) { + if (!map_individual_page_impl(page_index, paddr)) + break; + ++page_index; + paddr = paddr.offset(PAGE_SIZE); + } + if (page_index > 0) { + if (should_flush_tlb == ShouldFlushTLB::Yes) + MemoryManager::flush_tlb(m_page_directory, vaddr(), page_index); + if (page_index == page_count()) + return {}; + } + return ENOMEM; +} + void Region::remap() { VERIFY(m_page_directory); diff --git a/Kernel/Memory/Region.h b/Kernel/Memory/Region.h index f2807a4cbe6a5b..71dcdb07d8fbd3 100644 --- a/Kernel/Memory/Region.h +++ b/Kernel/Memory/Region.h @@ -196,6 +196,7 @@ class Region final void set_page_directory(PageDirectory&); ErrorOr map(PageDirectory&, ShouldFlushTLB = ShouldFlushTLB::Yes); + ErrorOr map(PageDirectory&, PhysicalAddress, ShouldFlushTLB = ShouldFlushTLB::Yes); void unmap(ShouldFlushTLB = ShouldFlushTLB::Yes); void unmap_with_locks_held(ShouldFlushTLB, SpinlockLocker>& pd_locker); @@ -235,6 +236,8 @@ class Region final [[nodiscard]] bool map_individual_page_impl(size_t page_index); [[nodiscard]] bool map_individual_page_impl(size_t page_index, RefPtr); + [[nodiscard]] bool map_individual_page_impl(size_t page_index, PhysicalAddress); + [[nodiscard]] bool map_individual_page_impl(size_t page_index, PhysicalAddress, bool readable, bool writeable); LockRefPtr m_page_directory; VirtualRange m_range; diff --git a/Kernel/Memory/TypedMapping.h b/Kernel/Memory/TypedMapping.h index ab2c82d801a2ac..2a18cd41c268a3 100644 --- a/Kernel/Memory/TypedMapping.h +++ b/Kernel/Memory/TypedMapping.h @@ -32,7 +32,7 @@ template static ErrorOr>> adopt_new_nonnull_own_typed_mapping(PhysicalAddress paddr, size_t length, Region::Access access = Region::Access::Read) { auto mapping_length = TRY(page_round_up(paddr.offset_in_page() + length)); - auto region = TRY(MM.allocate_kernel_region(paddr.page_base(), mapping_length, {}, access)); + auto region = TRY(MM.allocate_mmio_kernel_region(paddr.page_base(), mapping_length, {}, access)); auto table = TRY(adopt_nonnull_own_or_enomem(new (nothrow) Memory::TypedMapping())); table->region = move(region); table->offset = paddr.offset_in_page(); @@ -46,7 +46,7 @@ static ErrorOr> map_typed(PhysicalAddress paddr, size_t length, { TypedMapping table; auto mapping_length = TRY(page_round_up(paddr.offset_in_page() + length)); - table.region = TRY(MM.allocate_kernel_region(paddr.page_base(), mapping_length, {}, access)); + table.region = TRY(MM.allocate_mmio_kernel_region(paddr.page_base(), mapping_length, {}, access)); table.offset = paddr.offset_in_page(); table.paddr = paddr; table.length = length;