Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
17 changes: 17 additions & 0 deletions Cargo.lock
Original file line number Diff line number Diff line change
Expand Up @@ -3089,6 +3089,8 @@ dependencies = [
"pal_async",
"pci_bus",
"pci_core",
"pci_resources",
"pcie",
"range_map_vec",
"scsi_core",
"scsidisk",
Expand Down Expand Up @@ -5543,6 +5545,20 @@ dependencies = [
"vmcore",
]

[[package]]
name = "pcie"
version = "0.0.0"
dependencies = [
"chipset_device",
"inspect",
"pci_bus",
"pci_core",
"tracelimit",
"tracing",
"vmcore",
"zerocopy 0.8.25",
]

[[package]]
name = "pem-rfc7468"
version = "0.7.0"
Expand Down Expand Up @@ -9356,6 +9372,7 @@ dependencies = [
"parking_lot",
"paste",
"pci_bus",
"pcie",
"range_map_vec",
"state_unit",
"thiserror 2.0.16",
Expand Down
1 change: 1 addition & 0 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -254,6 +254,7 @@ vmswitch = { path = "vm/devices/net/vmswitch" }
pci_bus = { path = "vm/devices/pci/pci_bus" }
pci_core = { path = "vm/devices/pci/pci_core" }
pci_resources = { path = "vm/devices/pci/pci_resources" }
pcie = { path = "vm/devices/pci/pcie" }
vpci = { path = "vm/devices/pci/vpci" }
vpci_client = { path = "vm/devices/pci/vpci_client" }
vpci_protocol = { path = "vm/devices/pci/vpci_protocol" }
Expand Down
2 changes: 2 additions & 0 deletions openvmm/hvlite_core/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -62,6 +62,8 @@ input_core.workspace = true
missing_dev.workspace = true
pci_bus.workspace = true
pci_core.workspace = true
pci_resources.workspace = true
pcie.workspace = true
scsi_core.workspace = true
scsidisk.workspace = true
serial_16550_resources.workspace = true
Expand Down
13 changes: 13 additions & 0 deletions openvmm/hvlite_core/src/partition.rs
Original file line number Diff line number Diff line change
Expand Up @@ -90,6 +90,12 @@ pub trait HvlitePartition: Inspect + Send + Sync + RequestYield + Synic {
minimum_vtl: Vtl,
) -> Option<Arc<dyn DoorbellRegistration>>;

/// Gets the [`MsiInterruptTarget`] interface for a particular VTL.
fn into_msi_target(
self: Arc<Self>,
minimum_vtl: Vtl,
) -> Option<Arc<dyn MsiInterruptTarget>>;

/// Returns whether virtual devices are supported.
fn supports_virtual_devices(&self) -> bool;

Expand Down Expand Up @@ -204,6 +210,13 @@ where
self.doorbell_registration(minimum_vtl)
}

fn into_msi_target(
self: Arc<Self>,
minimum_vtl: Vtl,
) -> Option<Arc<dyn MsiInterruptTarget>> {
self.msi_interrupt_target(minimum_vtl)
}

fn supports_virtual_devices(&self) -> bool {
self.new_virtual_device().is_some()
}
Expand Down
117 changes: 114 additions & 3 deletions openvmm/hvlite_core/src/worker/dispatch.rs
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,8 @@ use hvlite_defs::config::Hypervisor;
use hvlite_defs::config::HypervisorConfig;
use hvlite_defs::config::LoadMode;
use hvlite_defs::config::MemoryConfig;
use hvlite_defs::config::PcieEndpointConfig;
use hvlite_defs::config::PcieRootComplexConfig;
use hvlite_defs::config::PmuGsivConfig;
use hvlite_defs::config::ProcessorTopologyConfig;
use hvlite_defs::config::SerialPipes;
Expand Down Expand Up @@ -74,6 +76,8 @@ use pal_async::task::Spawn;
use pal_async::task::Task;
use pci_core::PciInterruptPin;
use pci_core::msi::MsiInterruptSet;
use pcie::root::GenericPcieRootComplex;
use pcie::root::GenericPcieRootPortDefinition;
use scsi_core::ResolveScsiDeviceHandleParams;
use scsidisk::SimpleScsiDisk;
use scsidisk::atapi_scsi::AtapiScsiDisk;
Expand Down Expand Up @@ -106,6 +110,7 @@ use vm_resource::kind::MouseInputHandleKind;
use vm_resource::kind::VirtioDeviceHandle;
use vm_resource::kind::VmbusDeviceHandleKind;
use vm_topology::memory::MemoryLayout;
use vm_topology::pcie::PcieHostBridge;
use vm_topology::processor::ArchTopology;
use vm_topology::processor::ProcessorTopology;
use vm_topology::processor::TopologyBuilder;
Expand Down Expand Up @@ -165,6 +170,8 @@ impl Manifest {
load_mode: config.load_mode,
floppy_disks: config.floppy_disks,
ide_disks: config.ide_disks,
pcie_root_complexes: config.pcie_root_complexes,
pcie_endpoints: config.pcie_endpoints,
vpci_devices: config.vpci_devices,
hypervisor: config.hypervisor,
memory: config.memory,
Expand Down Expand Up @@ -206,6 +213,8 @@ pub struct Manifest {
load_mode: LoadMode,
floppy_disks: Vec<FloppyDiskConfig>,
ide_disks: Vec<IdeDeviceConfig>,
pcie_root_complexes: Vec<PcieRootComplexConfig>,
pcie_endpoints: Vec<PcieEndpointConfig>,
vpci_devices: Vec<VpciDeviceConfig>,
memory: MemoryConfig,
processor_topology: ProcessorTopologyConfig,
Expand Down Expand Up @@ -571,6 +580,7 @@ struct LoadedVmInner {
client_notify_send: mesh::Sender<HaltReason>,
/// allow the guest to reset without notifying the client
automatic_guest_reset: bool,
pcie_host_bridges: Vec<PcieHostBridge>,
}

fn choose_hypervisor() -> anyhow::Result<Hypervisor> {
Expand Down Expand Up @@ -1196,6 +1206,7 @@ impl InitializedVm {
processor_topology: &processor_topology,
mem_layout: &mem_layout,
cache_topology: None,
pcie_host_bridges: &Vec::new(),
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I have learned that "host bridges" is probably not the right term for this. What I intend to convey is the idea of the CPU's view of the PCIe root and what is exposed about the root through ACPI, but "host bridge" means different things on different platforms (ex. on Intel it means a PCIe function at address 0.0 on the internal bus of the root complex).

with_ioapic: cfg.chipset.with_generic_ioapic,
with_pic: cfg.chipset.with_generic_pic,
with_pit: cfg.chipset.with_generic_pit,
Expand Down Expand Up @@ -1727,6 +1738,99 @@ impl InitializedVm {
let mut vtl2_hvsock_relay = None;
let mut vmbus_redirect = false;

// PCI Express topology

let mut pcie_host_bridges = Vec::new();
{
// ECAM allocation starts at the configured base and grows upwards.
// Low MMIO allocation for PCIe starts just below the low MMIO window for other
// devices and grows downwards.
// High MMIO allocation for PCIe starts just above the high MMIO window for
// other devices and grows upwards.
let mut ecam_address = cfg.memory.pcie_ecam_base;
let mut low_mmio_address = cfg.memory.mmio_gaps[0].start();
let mut high_mmio_address = cfg.memory.mmio_gaps[1].end();

for rc in cfg.pcie_root_complexes {
let bus_count = (rc.end_bus as u16) - (rc.start_bus as u16) + 1;
let ecam_size = (bus_count as u64) * 256 * 4096;
let low_mmio_size = rc.low_mmio_size as u64;

let host_bridge = PcieHostBridge {
index: rc.index,
segment: rc.segment,
start_bus: rc.start_bus,
end_bus: rc.end_bus,
ecam_range: MemoryRange::new(ecam_address..ecam_address + ecam_size),
low_mmio: MemoryRange::new(low_mmio_address - low_mmio_size..low_mmio_address),
high_mmio: MemoryRange::new(
high_mmio_address..high_mmio_address + rc.high_mmio_size,
),
};

let device_name = format!("pcie-rc{}:{}", host_bridge.index, rc.name);
let root_complex =
chipset_builder
.arc_mutex_device(device_name)
.add(|services| {
let root_port_definitions = rc
.ports
.into_iter()
.map(|rp_cfg| GenericPcieRootPortDefinition {
name: rp_cfg.name.into(),
})
.collect();

GenericPcieRootComplex::new(
&mut services.register_mmio(),
host_bridge.start_bus,
host_bridge.end_bus,
host_bridge.ecam_range.start(),
root_port_definitions,
)
})?;

let bus_id = vmotherboard::BusId::new(&rc.name);
chipset_builder.register_weak_mutex_pcie_enumerator(bus_id, Box::new(root_complex));
pcie_host_bridges.push(host_bridge);

ecam_address += ecam_size;
low_mmio_address -= low_mmio_size;
high_mmio_address += rc.high_mmio_size;
}
}

for dev_cfg in cfg.pcie_endpoints {
let dev_name = format!("pcie:{}-{}", dev_cfg.port_name, dev_cfg.resource.id());
let mut msi_set = MsiInterruptSet::new();
chipset_builder
.arc_mutex_device(dev_name)
.on_pcie_port(vmotherboard::BusId::new(&dev_cfg.port_name))
.try_add_async(async |services| {
resolver
.resolve(
dev_cfg.resource,
pci_resources::ResolvePciDeviceHandleParams {
register_msi: &mut msi_set,
register_mmio: &mut services.register_mmio(),
driver_source: &driver_source,
guest_memory: &gm,
doorbell_registration: partition
.clone()
.into_doorbell_registration(Vtl::Vtl0),
shared_mem_mapper: Some(&mapper),
},
)
.await
.map(|r| r.0)
})
.await?;

if let Some(target) = partition.clone().into_msi_target(Vtl::Vtl0) {
msi_set.connect(target.as_ref());
}
}

if let Some(vmbus_cfg) = cfg.vmbus {
if !cfg.hypervisor.with_hv {
anyhow::bail!("vmbus required hypervisor enlightements");
Expand Down Expand Up @@ -2354,6 +2458,7 @@ impl InitializedVm {
halt_recv,
client_notify_send,
automatic_guest_reset: cfg.automatic_guest_reset,
pcie_host_bridges,
},
};

Expand Down Expand Up @@ -2383,6 +2488,7 @@ impl LoadedVmInner {
processor_topology: &self.processor_topology,
mem_layout: &self.mem_layout,
cache_topology: cache_topology.as_ref(),
pcie_host_bridges: &self.pcie_host_bridges,
with_ioapic: self.chipset_cfg.with_generic_ioapic,
with_psp: self.chipset_cfg.with_generic_psp,
with_pic: self.chipset_cfg.with_generic_pic,
Expand Down Expand Up @@ -2478,6 +2584,7 @@ impl LoadedVmInner {
} => {
let madt = acpi_builder.build_madt();
let srat = acpi_builder.build_srat();
let mcfg = (!self.pcie_host_bridges.is_empty()).then(|| acpi_builder.build_mcfg());
let pptt = cache_topology.is_some().then(|| acpi_builder.build_pptt());
let load_settings = super::vm_loaders::uefi::UefiLoadSettings {
debugging: enable_debugging,
Expand All @@ -2496,9 +2603,11 @@ impl LoadedVmInner {
&self.gm,
&self.processor_topology,
&self.mem_layout,
&self.pcie_host_bridges,
load_settings,
&madt,
&srat,
mcfg.as_deref(),
pptt.as_deref(),
)?;

Expand Down Expand Up @@ -2941,9 +3050,11 @@ impl LoadedVm {

let manifest = Manifest {
load_mode: self.inner.load_mode,
floppy_disks: vec![], // TODO
ide_disks: vec![], // TODO
vpci_devices: vec![], // TODO
floppy_disks: vec![], // TODO
ide_disks: vec![], // TODO
pcie_root_complexes: vec![], // TODO
pcie_endpoints: vec![], // TODO
vpci_devices: vec![], // TODO
memory: self.inner.memory_cfg,
processor_topology: self.inner.processor_topology.to_config(),
chipset: self.inner.chipset_cfg,
Expand Down
22 changes: 22 additions & 0 deletions openvmm/hvlite_core/src/worker/vm_loaders/uefi.rs
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@ use std::io::Seek;
use thiserror::Error;
use vm_loader::Loader;
use vm_topology::memory::MemoryLayout;
use vm_topology::pcie::PcieHostBridge;
use vm_topology::processor::ProcessorTopology;
use zerocopy::IntoBytes;

Expand Down Expand Up @@ -47,9 +48,11 @@ pub fn load_uefi(
gm: &GuestMemory,
processor_topology: &ProcessorTopology,
mem_layout: &MemoryLayout,
pcie_host_bridges: &Vec<PcieHostBridge>,
load_settings: UefiLoadSettings,
madt: &[u8],
srat: &[u8],
mcfg: Option<&[u8]>,
pptt: Option<&[u8]>,
) -> Result<Vec<Register>, Error> {
if mem_layout.mmio().len() < 2 {
Expand Down Expand Up @@ -155,10 +158,29 @@ pub fn load_uefi(
});
}

if let Some(mcfg) = mcfg {
cfg.add_raw(config::BlobStructureType::Mcfg, mcfg);
}

if let Some(pptt) = pptt {
cfg.add_raw(config::BlobStructureType::Pptt, pptt);
}

if !pcie_host_bridges.is_empty() {
let mut ssdt = acpi::ssdt::Ssdt::new();
for bridge in pcie_host_bridges {
ssdt.add_pcie(
bridge.index,
bridge.segment,
bridge.start_bus,
bridge.end_bus,
bridge.low_mmio,
bridge.high_mmio,
);
}
cfg.add_raw(config::BlobStructureType::Ssdt, &ssdt.to_bytes());
}

let mut loader = Loader::new(gm.clone(), mem_layout, hvdef::Vtl::Vtl0);

loader::uefi::load(
Expand Down
28 changes: 28 additions & 0 deletions openvmm/hvlite_defs/src/config.rs
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,8 @@ pub struct Config {
pub load_mode: LoadMode,
pub floppy_disks: Vec<floppy_resources::FloppyDiskConfig>,
pub ide_disks: Vec<ide_resources::IdeDeviceConfig>,
pub pcie_root_complexes: Vec<PcieRootComplexConfig>,
pub pcie_endpoints: Vec<PcieEndpointConfig>,
pub vpci_devices: Vec<VpciDeviceConfig>,
pub memory: MemoryConfig,
pub processor_topology: ProcessorTopologyConfig,
Expand Down Expand Up @@ -96,6 +98,8 @@ pub const DEFAULT_GIC_REDISTRIBUTORS_BASE: u64 = if cfg!(target_os = "linux") {
0xEFFE_E000
};

pub const DEFAULT_PCIE_ECAM_BASE: u64 = 0x8_0000_0000; // 32GB, size depends on configuration

#[derive(MeshPayload, Debug)]
pub enum LoadMode {
Linux {
Expand Down Expand Up @@ -164,6 +168,29 @@ pub enum Vtl2BaseAddressType {
Vtl2Allocate { size: Option<u64> },
}

#[derive(Debug, MeshPayload)]
pub struct PcieRootComplexConfig {
pub index: u32,
pub name: String,
pub segment: u16,
pub start_bus: u8,
pub end_bus: u8,
pub low_mmio_size: u32,
pub high_mmio_size: u64,
pub ports: Vec<PcieRootPortConfig>,
}

#[derive(Debug, MeshPayload)]
pub struct PcieRootPortConfig {
pub name: String,
}

#[derive(Debug, MeshPayload)]
pub struct PcieEndpointConfig {
pub port_name: String,
pub resource: Resource<PciDeviceHandleKind>,
}

#[derive(Debug, MeshPayload)]
pub struct VpciDeviceConfig {
pub vtl: DeviceVtl,
Expand Down Expand Up @@ -234,6 +261,7 @@ pub struct MemoryConfig {
pub mem_size: u64,
pub mmio_gaps: Vec<MemoryRange>,
pub prefetch_memory: bool,
pub pcie_ecam_base: u64,
}

#[derive(Debug, MeshPayload, Default)]
Expand Down
Loading