31 changes: 25 additions & 6 deletions hw/i386/intel_iommu.c
Expand Up @@ -1530,13 +1530,17 @@ static int vtd_sync_shadow_page_table_range(VTDAddressSpace *vtd_as,
return vtd_page_walk(s, ce, addr, addr + size, &info, vtd_as->pasid);
}

static int vtd_sync_shadow_page_table(VTDAddressSpace *vtd_as)
static int vtd_address_space_sync(VTDAddressSpace *vtd_as)
{
int ret;
VTDContextEntry ce;
IOMMUNotifier *n;

if (!(vtd_as->iommu.iommu_notify_flags & IOMMU_NOTIFIER_IOTLB_EVENTS)) {
/* If no MAP notifier registered, we simply invalidate all the cache */
if (!vtd_as_has_map_notifier(vtd_as)) {
IOMMU_NOTIFIER_FOREACH(n, &vtd_as->iommu) {
memory_region_unmap_iommu_notifier_range(n);
}
return 0;
}

Expand Down Expand Up @@ -2000,7 +2004,7 @@ static void vtd_iommu_replay_all(IntelIOMMUState *s)
VTDAddressSpace *vtd_as;

QLIST_FOREACH(vtd_as, &s->vtd_as_with_notifiers, next) {
vtd_sync_shadow_page_table(vtd_as);
vtd_address_space_sync(vtd_as);
}
}

Expand Down Expand Up @@ -2082,7 +2086,7 @@ static void vtd_context_device_invalidate(IntelIOMMUState *s,
* framework will skip MAP notifications if that
* happened.
*/
vtd_sync_shadow_page_table(vtd_as);
vtd_address_space_sync(vtd_as);
}
}
}
Expand Down Expand Up @@ -2140,7 +2144,7 @@ static void vtd_iotlb_domain_invalidate(IntelIOMMUState *s, uint16_t domain_id)
if (!vtd_dev_to_context_entry(s, pci_bus_num(vtd_as->bus),
vtd_as->devfn, &ce) &&
domain_id == vtd_get_domain_id(s, &ce, vtd_as->pasid)) {
vtd_sync_shadow_page_table(vtd_as);
vtd_address_space_sync(vtd_as);
}
}
}
Expand Down Expand Up @@ -3179,13 +3183,28 @@ static int vtd_iommu_notify_flag_changed(IOMMUMemoryRegion *iommu,
{
VTDAddressSpace *vtd_as = container_of(iommu, VTDAddressSpace, iommu);
IntelIOMMUState *s = vtd_as->iommu_state;
X86IOMMUState *x86_iommu = X86_IOMMU_DEVICE(s);

/* TODO: add support for VFIO and vhost users */
if (s->snoop_control) {
error_setg_errno(errp, ENOTSUP,
"Snoop Control with vhost or VFIO is not supported");
return -ENOTSUP;
}
if (!s->caching_mode && (new & IOMMU_NOTIFIER_MAP)) {
error_setg_errno(errp, ENOTSUP,
"device %02x.%02x.%x requires caching mode",
pci_bus_num(vtd_as->bus), PCI_SLOT(vtd_as->devfn),
PCI_FUNC(vtd_as->devfn));
return -ENOTSUP;
}
if (!x86_iommu->dt_supported && (new & IOMMU_NOTIFIER_DEVIOTLB_UNMAP)) {
error_setg_errno(errp, ENOTSUP,
"device %02x.%02x.%x requires device IOTLB mode",
pci_bus_num(vtd_as->bus), PCI_SLOT(vtd_as->devfn),
PCI_FUNC(vtd_as->devfn));
return -ENOTSUP;
}

/* Update per-address-space notifier flags */
vtd_as->notifier_flags = new;
Expand Down Expand Up @@ -3831,7 +3850,7 @@ static void vtd_iommu_replay(IOMMUMemoryRegion *iommu_mr, IOMMUNotifier *n)
.domain_id = vtd_get_domain_id(s, &ce, vtd_as->pasid),
};

vtd_page_walk(s, &ce, 0, ~0ULL, &info, vtd_as->pasid);
vtd_page_walk(s, &ce, n->start, n->end, &info, vtd_as->pasid);
}
} else {
trace_vtd_replay_ce_invalid(bus_n, PCI_SLOT(vtd_as->devfn),
Expand Down
17 changes: 6 additions & 11 deletions hw/i386/microvm.c
Expand Up @@ -328,7 +328,7 @@ static void microvm_memory_init(MicrovmMachineState *mms)
rom_set_fw(fw_cfg);

if (machine->kernel_filename != NULL) {
x86_load_linux(x86ms, fw_cfg, 0, true, false);
x86_load_linux(x86ms, fw_cfg, 0, true);
}

if (mms->option_roms) {
Expand Down Expand Up @@ -376,8 +376,7 @@ static void microvm_fix_kernel_cmdline(MachineState *machine)
MicrovmMachineState *mms = MICROVM_MACHINE(machine);
BusState *bus;
BusChild *kid;
char *cmdline, *existing_cmdline;
size_t len;
char *cmdline;

/*
* Find MMIO transports with attached devices, and add them to the kernel
Expand All @@ -386,8 +385,7 @@ static void microvm_fix_kernel_cmdline(MachineState *machine)
* Yes, this is a hack, but one that heavily improves the UX without
* introducing any significant issues.
*/
existing_cmdline = fw_cfg_read_bytes_ptr(x86ms->fw_cfg, FW_CFG_CMDLINE_DATA);
cmdline = g_strdup(existing_cmdline);
cmdline = g_strdup(machine->kernel_cmdline);
bus = sysbus_get_default();
QTAILQ_FOREACH(kid, &bus->children, sibling) {
DeviceState *dev = kid->child;
Expand All @@ -411,12 +409,9 @@ static void microvm_fix_kernel_cmdline(MachineState *machine)
}
}

len = strlen(cmdline);
if (len > VIRTIO_CMDLINE_TOTAL_MAX_LEN + strlen(existing_cmdline)) {
fprintf(stderr, "qemu: virtio mmio cmdline too large, skipping\n");
} else {
memcpy(existing_cmdline, cmdline, len + 1);
}
fw_cfg_modify_i32(x86ms->fw_cfg, FW_CFG_CMDLINE_SIZE, strlen(cmdline) + 1);
fw_cfg_modify_string(x86ms->fw_cfg, FW_CFG_CMDLINE_DATA, cmdline);

g_free(cmdline);
}

Expand Down
4 changes: 2 additions & 2 deletions hw/i386/pc.c
Expand Up @@ -810,7 +810,7 @@ void xen_load_linux(PCMachineState *pcms)
rom_set_fw(fw_cfg);

x86_load_linux(x86ms, fw_cfg, pcmc->acpi_data_size,
pcmc->pvh_enabled, pcmc->legacy_no_rng_seed);
pcmc->pvh_enabled);
for (i = 0; i < nb_option_roms; i++) {
assert(!strcmp(option_rom[i].name, "linuxboot.bin") ||
!strcmp(option_rom[i].name, "linuxboot_dma.bin") ||
Expand Down Expand Up @@ -1130,7 +1130,7 @@ void pc_memory_init(PCMachineState *pcms,

if (linux_boot) {
x86_load_linux(x86ms, fw_cfg, pcmc->acpi_data_size,
pcmc->pvh_enabled, pcmc->legacy_no_rng_seed);
pcmc->pvh_enabled);
}

for (i = 0; i < nb_option_roms; i++) {
Expand Down
2 changes: 0 additions & 2 deletions hw/i386/pc_piix.c
Expand Up @@ -477,9 +477,7 @@ DEFINE_I440FX_MACHINE(v7_2, "pc-i440fx-7.2", NULL,

static void pc_i440fx_7_1_machine_options(MachineClass *m)
{
PCMachineClass *pcmc = PC_MACHINE_CLASS(m);
pc_i440fx_7_2_machine_options(m);
pcmc->legacy_no_rng_seed = true;
compat_props_add(m->compat_props, hw_compat_7_1, hw_compat_7_1_len);
compat_props_add(m->compat_props, pc_compat_7_1, pc_compat_7_1_len);
}
Expand Down
2 changes: 0 additions & 2 deletions hw/i386/pc_q35.c
Expand Up @@ -395,9 +395,7 @@ DEFINE_Q35_MACHINE(v7_2, "pc-q35-7.2", NULL,

static void pc_q35_7_1_machine_options(MachineClass *m)
{
PCMachineClass *pcmc = PC_MACHINE_CLASS(m);
pc_q35_7_2_machine_options(m);
pcmc->legacy_no_rng_seed = true;
compat_props_add(m->compat_props, hw_compat_7_1, hw_compat_7_1_len);
compat_props_add(m->compat_props, pc_compat_7_1, pc_compat_7_1_len);
}
Expand Down
122 changes: 32 additions & 90 deletions hw/i386/x86.c
Expand Up @@ -26,7 +26,6 @@
#include "qemu/cutils.h"
#include "qemu/units.h"
#include "qemu/datadir.h"
#include "qemu/guest-random.h"
#include "qapi/error.h"
#include "qapi/qapi-visit-common.h"
#include "qapi/clone-visitor.h"
Expand All @@ -36,7 +35,6 @@
#include "sysemu/whpx.h"
#include "sysemu/numa.h"
#include "sysemu/replay.h"
#include "sysemu/reset.h"
#include "sysemu/sysemu.h"
#include "sysemu/cpu-timers.h"
#include "sysemu/xen.h"
Expand All @@ -49,7 +47,6 @@
#include "hw/intc/i8259.h"
#include "hw/rtc/mc146818rtc.h"
#include "target/i386/sev.h"
#include "hw/i386/microvm.h"

#include "hw/acpi/cpu_hotplug.h"
#include "hw/irq.h"
Expand Down Expand Up @@ -675,12 +672,12 @@ DeviceState *ioapic_init_secondary(GSIState *gsi_state)
return dev;
}

typedef struct SetupData {
struct setup_data {
uint64_t next;
uint32_t type;
uint32_t len;
uint8_t data[];
} __attribute__((packed)) SetupData;
} __attribute__((packed));


/*
Expand Down Expand Up @@ -787,62 +784,30 @@ static bool load_elfboot(const char *kernel_filename,
return true;
}

typedef struct SetupDataFixup {
void *pos;
hwaddr orig_val, new_val;
uint32_t addr;
} SetupDataFixup;

static void fixup_setup_data(void *opaque)
{
SetupDataFixup *fixup = opaque;
stq_p(fixup->pos, fixup->new_val);
}

static void reset_setup_data(void *opaque)
{
SetupDataFixup *fixup = opaque;
stq_p(fixup->pos, fixup->orig_val);
}

static void reset_rng_seed(void *opaque)
{
SetupData *setup_data = opaque;
qemu_guest_getrandom_nofail(setup_data->data, le32_to_cpu(setup_data->len));
}

void x86_load_linux(X86MachineState *x86ms,
FWCfgState *fw_cfg,
int acpi_data_size,
bool pvh_enabled,
bool legacy_no_rng_seed)
bool pvh_enabled)
{
bool linuxboot_dma_enabled = X86_MACHINE_GET_CLASS(x86ms)->fwcfg_dma_enabled;
uint16_t protocol;
int setup_size, kernel_size, cmdline_size;
int dtb_size, setup_data_offset;
uint32_t initrd_max;
uint8_t header[8192], *setup, *kernel;
hwaddr real_addr, prot_addr, cmdline_addr, initrd_addr = 0, first_setup_data = 0;
hwaddr real_addr, prot_addr, cmdline_addr, initrd_addr = 0;
FILE *f;
char *vmode;
MachineState *machine = MACHINE(x86ms);
SetupData *setup_data;
struct setup_data *setup_data;
const char *kernel_filename = machine->kernel_filename;
const char *initrd_filename = machine->initrd_filename;
const char *dtb_filename = machine->dtb;
char *kernel_cmdline;
const char *kernel_cmdline = machine->kernel_cmdline;
SevKernelLoaderContext sev_load_ctx = {};
enum { RNG_SEED_LENGTH = 32 };

/*
* Add the NUL terminator, some padding for the microvm cmdline fiddling
* hack, and then align to 16 bytes as a paranoia measure
*/
cmdline_size = (strlen(machine->kernel_cmdline) + 1 +
VIRTIO_CMDLINE_TOTAL_MAX_LEN + 16) & ~15;
/* Make a copy, since we might append arbitrary bytes to it later. */
kernel_cmdline = g_strndup(machine->kernel_cmdline, cmdline_size);
/* Align to 16 bytes as a paranoia measure */
cmdline_size = (strlen(kernel_cmdline) + 16) & ~15;

/* load the kernel header */
f = fopen(kernel_filename, "rb");
Expand Down Expand Up @@ -983,6 +948,12 @@ void x86_load_linux(X86MachineState *x86ms,
initrd_max = x86ms->below_4g_mem_size - acpi_data_size - 1;
}

fw_cfg_add_i32(fw_cfg, FW_CFG_CMDLINE_ADDR, cmdline_addr);
fw_cfg_add_i32(fw_cfg, FW_CFG_CMDLINE_SIZE, strlen(kernel_cmdline) + 1);
fw_cfg_add_string(fw_cfg, FW_CFG_CMDLINE_DATA, kernel_cmdline);
sev_load_ctx.cmdline_data = (char *)kernel_cmdline;
sev_load_ctx.cmdline_size = strlen(kernel_cmdline) + 1;

if (protocol >= 0x202) {
stl_p(header + 0x228, cmdline_addr);
} else {
Expand Down Expand Up @@ -1109,67 +1080,38 @@ void x86_load_linux(X86MachineState *x86ms,
exit(1);
}

setup_data_offset = cmdline_size;
cmdline_size += sizeof(SetupData) + dtb_size;
kernel_cmdline = g_realloc(kernel_cmdline, cmdline_size);
setup_data = (void *)kernel_cmdline + setup_data_offset;
setup_data->next = cpu_to_le64(first_setup_data);
first_setup_data = cmdline_addr + setup_data_offset;
setup_data_offset = QEMU_ALIGN_UP(kernel_size, 16);
kernel_size = setup_data_offset + sizeof(struct setup_data) + dtb_size;
kernel = g_realloc(kernel, kernel_size);

stq_p(header + 0x250, prot_addr + setup_data_offset);

setup_data = (struct setup_data *)(kernel + setup_data_offset);
setup_data->next = 0;
setup_data->type = cpu_to_le32(SETUP_DTB);
setup_data->len = cpu_to_le32(dtb_size);
load_image_size(dtb_filename, setup_data->data, dtb_size);
}

if (!legacy_no_rng_seed && protocol >= 0x209) {
setup_data_offset = cmdline_size;
cmdline_size += sizeof(SetupData) + RNG_SEED_LENGTH;
kernel_cmdline = g_realloc(kernel_cmdline, cmdline_size);
setup_data = (void *)kernel_cmdline + setup_data_offset;
setup_data->next = cpu_to_le64(first_setup_data);
first_setup_data = cmdline_addr + setup_data_offset;
setup_data->type = cpu_to_le32(SETUP_RNG_SEED);
setup_data->len = cpu_to_le32(RNG_SEED_LENGTH);
qemu_guest_getrandom_nofail(setup_data->data, RNG_SEED_LENGTH);
qemu_register_reset_nosnapshotload(reset_rng_seed, setup_data);
fw_cfg_add_bytes_callback(fw_cfg, FW_CFG_KERNEL_DATA, reset_rng_seed, NULL,
setup_data, kernel, kernel_size, true);
} else {
fw_cfg_add_bytes(fw_cfg, FW_CFG_KERNEL_DATA, kernel, kernel_size);
load_image_size(dtb_filename, setup_data->data, dtb_size);
}

fw_cfg_add_i32(fw_cfg, FW_CFG_CMDLINE_ADDR, cmdline_addr);
fw_cfg_add_i32(fw_cfg, FW_CFG_CMDLINE_SIZE, cmdline_size);
fw_cfg_add_bytes(fw_cfg, FW_CFG_CMDLINE_DATA, kernel_cmdline, cmdline_size);
sev_load_ctx.cmdline_data = (char *)kernel_cmdline;
sev_load_ctx.cmdline_size = cmdline_size;

fw_cfg_add_i32(fw_cfg, FW_CFG_KERNEL_ADDR, prot_addr);
fw_cfg_add_i32(fw_cfg, FW_CFG_KERNEL_SIZE, kernel_size);
sev_load_ctx.kernel_data = (char *)kernel;
sev_load_ctx.kernel_size = kernel_size;

/*
* If we're starting an encrypted VM, it will be OVMF based, which uses the
* efi stub for booting and doesn't require any values to be placed in the
* kernel header. We therefore don't update the header so the hash of the
* kernel on the other side of the fw_cfg interface matches the hash of the
* file the user passed in.
*/
if (!sev_enabled() && first_setup_data) {
SetupDataFixup *fixup = g_malloc(sizeof(*fixup));

if (!sev_enabled()) {
memcpy(setup, header, MIN(sizeof(header), setup_size));
/* Offset 0x250 is a pointer to the first setup_data link. */
fixup->pos = setup + 0x250;
fixup->orig_val = ldq_p(fixup->pos);
fixup->new_val = first_setup_data;
fixup->addr = cpu_to_le32(real_addr);
fw_cfg_add_bytes_callback(fw_cfg, FW_CFG_SETUP_ADDR, fixup_setup_data, NULL,
fixup, &fixup->addr, sizeof(fixup->addr), true);
qemu_register_reset(reset_setup_data, fixup);
} else {
fw_cfg_add_i32(fw_cfg, FW_CFG_SETUP_ADDR, real_addr);
}

fw_cfg_add_i32(fw_cfg, FW_CFG_KERNEL_ADDR, prot_addr);
fw_cfg_add_i32(fw_cfg, FW_CFG_KERNEL_SIZE, kernel_size);
fw_cfg_add_bytes(fw_cfg, FW_CFG_KERNEL_DATA, kernel, kernel_size);
sev_load_ctx.kernel_data = (char *)kernel;
sev_load_ctx.kernel_size = kernel_size;

fw_cfg_add_i32(fw_cfg, FW_CFG_SETUP_ADDR, real_addr);
fw_cfg_add_i32(fw_cfg, FW_CFG_SETUP_SIZE, setup_size);
fw_cfg_add_bytes(fw_cfg, FW_CFG_SETUP_DATA, setup, setup_size);
sev_load_ctx.setup_data = (char *)setup;
Expand Down
15 changes: 11 additions & 4 deletions hw/mem/cxl_type3.c
Expand Up @@ -401,14 +401,13 @@ static void ct3_realize(PCIDevice *pci_dev, Error **errp)
MemoryRegion *mr = &regs->component_registers;
uint8_t *pci_conf = pci_dev->config;
unsigned short msix_num = 1;
int i;
int i, rc;

if (!cxl_setup_memory(ct3d, errp)) {
return;
}

pci_config_set_prog_interface(pci_conf, 0x10);
pci_config_set_class(pci_conf, PCI_CLASS_MEMORY_CXL);

pcie_endpoint_cap_init(pci_dev, 0x80);
if (ct3d->sn != UI64_NULL) {
Expand Down Expand Up @@ -438,7 +437,10 @@ static void ct3_realize(PCIDevice *pci_dev, Error **errp)
&ct3d->cxl_dstate.device_registers);

/* MSI(-X) Initailization */
msix_init_exclusive_bar(pci_dev, msix_num, 4, NULL);
rc = msix_init_exclusive_bar(pci_dev, msix_num, 4, NULL);
if (rc) {
goto err_address_space_free;
}
for (i = 0; i < msix_num; i++) {
msix_vector_use(pci_dev, i);
}
Expand All @@ -450,6 +452,11 @@ static void ct3_realize(PCIDevice *pci_dev, Error **errp)
cxl_cstate->cdat.free_cdat_table = ct3_free_cdat_table;
cxl_cstate->cdat.private = ct3d;
cxl_doe_cdat_init(cxl_cstate, errp);
return;

err_address_space_free:
address_space_destroy(&ct3d->hostmem_as);
return;
}

static void ct3_exit(PCIDevice *pci_dev)
Expand Down Expand Up @@ -619,7 +626,7 @@ static void ct3_class_init(ObjectClass *oc, void *data)

pc->realize = ct3_realize;
pc->exit = ct3_exit;
pc->class_id = PCI_CLASS_STORAGE_EXPRESS;
pc->class_id = PCI_CLASS_MEMORY_CXL;
pc->vendor_id = PCI_VENDOR_ID_INTEL;
pc->device_id = 0xd93; /* LVF for now */
pc->revision = 1;
Expand Down
15 changes: 15 additions & 0 deletions hw/net/virtio-net.c
Expand Up @@ -820,6 +820,21 @@ static uint64_t virtio_net_get_features(VirtIODevice *vdev, uint64_t features,
features |= (1ULL << VIRTIO_NET_F_MTU);
}

/*
* Since GUEST_ANNOUNCE is emulated the feature bit could be set without
* enabled. This happens in the vDPA case.
*
* Make sure the feature set is not incoherent, as the driver could refuse
* to start.
*
* TODO: QEMU is able to emulate a CVQ just for guest_announce purposes,
* helping guest to notify the new location with vDPA devices that does not
* support it.
*/
if (!virtio_has_feature(vdev->backend_features, VIRTIO_NET_F_CTRL_VQ)) {
virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_ANNOUNCE);
}

return features;
}

Expand Down
21 changes: 6 additions & 15 deletions hw/nvram/fw_cfg.c
Expand Up @@ -693,12 +693,12 @@ static const VMStateDescription vmstate_fw_cfg = {
}
};

void fw_cfg_add_bytes_callback(FWCfgState *s, uint16_t key,
FWCfgCallback select_cb,
FWCfgWriteCallback write_cb,
void *callback_opaque,
void *data, size_t len,
bool read_only)
static void fw_cfg_add_bytes_callback(FWCfgState *s, uint16_t key,
FWCfgCallback select_cb,
FWCfgWriteCallback write_cb,
void *callback_opaque,
void *data, size_t len,
bool read_only)
{
int arch = !!(key & FW_CFG_ARCH_LOCAL);

Expand Down Expand Up @@ -741,15 +741,6 @@ void fw_cfg_add_bytes(FWCfgState *s, uint16_t key, void *data, size_t len)
fw_cfg_add_bytes_callback(s, key, NULL, NULL, NULL, data, len, true);
}

void *fw_cfg_read_bytes_ptr(FWCfgState *s, uint16_t key)
{
int arch = !!(key & FW_CFG_ARCH_LOCAL);

key &= FW_CFG_ENTRY_MASK;
assert(key < fw_cfg_max_entry(s));
return s->entries[arch][key].data;
}

void fw_cfg_add_string(FWCfgState *s, uint16_t key, const char *value)
{
size_t sz = strlen(value) + 1;
Expand Down
2 changes: 1 addition & 1 deletion hw/pci-bridge/cxl_downstream.c
Expand Up @@ -15,7 +15,7 @@
#include "hw/pci/pcie_port.h"
#include "qapi/error.h"

typedef struct CXLDownStreamPort {
typedef struct CXLDownstreamPort {
/*< private >*/
PCIESlot parent_obj;

Expand Down
8 changes: 8 additions & 0 deletions hw/pci/pci.c
Expand Up @@ -282,9 +282,13 @@ static void pci_change_irq_level(PCIDevice *pci_dev, int irq_num, int change)
{
PCIBus *bus;
for (;;) {
int dev_irq = irq_num;
bus = pci_get_bus(pci_dev);
assert(bus->map_irq);
irq_num = bus->map_irq(pci_dev, irq_num);
trace_pci_route_irq(dev_irq, DEVICE(pci_dev)->canonical_path, irq_num,
pci_bus_is_root(bus) ? "root-complex"
: DEVICE(bus->parent_dev)->canonical_path);
if (bus->set_irq)
break;
pci_dev = bus->parent_dev;
Expand Down Expand Up @@ -1617,8 +1621,12 @@ PCIINTxRoute pci_device_route_intx_to_irq(PCIDevice *dev, int pin)
PCIBus *bus;

do {
int dev_irq = pin;
bus = pci_get_bus(dev);
pin = bus->map_irq(dev, pin);
trace_pci_route_irq(dev_irq, DEVICE(dev)->canonical_path, pin,
pci_bus_is_root(bus) ? "root-complex"
: DEVICE(bus->parent_dev)->canonical_path);
dev = bus->parent_dev;
} while (dev);

Expand Down
30 changes: 18 additions & 12 deletions hw/pci/pcie.c
Expand Up @@ -39,6 +39,11 @@
#define PCIE_DEV_PRINTF(dev, fmt, ...) \
PCIE_DPRINTF("%s:%x "fmt, (dev)->name, (dev)->devfn, ## __VA_ARGS__)

static bool pcie_sltctl_powered_off(uint16_t sltctl)
{
return (sltctl & PCI_EXP_SLTCTL_PCC) == PCI_EXP_SLTCTL_PWR_OFF
&& (sltctl & PCI_EXP_SLTCTL_PIC) == PCI_EXP_SLTCTL_PWR_IND_OFF;
}

/***************************************************************************
* pci express capability helper functions
Expand Down Expand Up @@ -373,8 +378,8 @@ void pcie_cap_slot_enable_power(PCIDevice *dev)
uint32_t sltcap = pci_get_long(exp_cap + PCI_EXP_SLTCAP);

if (sltcap & PCI_EXP_SLTCAP_PCP) {
pci_set_word_by_mask(exp_cap + PCI_EXP_SLTCTL,
PCI_EXP_SLTCTL_PCC, PCI_EXP_SLTCTL_PWR_ON);
pci_word_test_and_clear_mask(exp_cap + PCI_EXP_SLTCTL,
PCI_EXP_SLTCTL_PCC);
}
}

Expand All @@ -395,6 +400,7 @@ static void pcie_cap_update_power(PCIDevice *hotplug_dev)

if (sltcap & PCI_EXP_SLTCAP_PCP) {
power = (sltctl & PCI_EXP_SLTCTL_PCC) == PCI_EXP_SLTCTL_PWR_ON;
/* Don't we need to check also (sltctl & PCI_EXP_SLTCTL_PIC) ? */
}

pci_for_each_device(sec_bus, pci_bus_num(sec_bus),
Expand Down Expand Up @@ -579,8 +585,7 @@ void pcie_cap_slot_unplug_request_cb(HotplugHandler *hotplug_dev,
return;
}

if (((sltctl & PCI_EXP_SLTCTL_PIC) == PCI_EXP_SLTCTL_PWR_IND_OFF) &&
((sltctl & PCI_EXP_SLTCTL_PCC) == PCI_EXP_SLTCTL_PWR_OFF)) {
if (pcie_sltctl_powered_off(sltctl)) {
/* slot is powered off -> unplug without round-trip to the guest */
pcie_cap_slot_do_unplug(hotplug_pdev);
hotplug_event_notify(hotplug_pdev);
Expand Down Expand Up @@ -634,8 +639,8 @@ void pcie_cap_slot_init(PCIDevice *dev, PCIESlot *s)
PCI_EXP_SLTCTL_PIC |
PCI_EXP_SLTCTL_AIC);
pci_word_test_and_set_mask(dev->config + pos + PCI_EXP_SLTCTL,
PCI_EXP_SLTCTL_PIC_OFF |
PCI_EXP_SLTCTL_AIC_OFF);
PCI_EXP_SLTCTL_PWR_IND_OFF |
PCI_EXP_SLTCTL_ATTN_IND_OFF);
pci_word_test_and_set_mask(dev->wmask + pos + PCI_EXP_SLTCTL,
PCI_EXP_SLTCTL_PIC |
PCI_EXP_SLTCTL_AIC |
Expand Down Expand Up @@ -679,7 +684,8 @@ void pcie_cap_slot_reset(PCIDevice *dev)
PCI_EXP_SLTCTL_PDCE |
PCI_EXP_SLTCTL_ABPE);
pci_word_test_and_set_mask(exp_cap + PCI_EXP_SLTCTL,
PCI_EXP_SLTCTL_AIC_OFF);
PCI_EXP_SLTCTL_PWR_IND_OFF |
PCI_EXP_SLTCTL_ATTN_IND_OFF);

if (dev->cap_present & QEMU_PCIE_SLTCAP_PCP) {
/* Downstream ports enforce device number 0. */
Expand All @@ -694,7 +700,8 @@ void pcie_cap_slot_reset(PCIDevice *dev)
PCI_EXP_SLTCTL_PCC);
}

pic = populated ? PCI_EXP_SLTCTL_PIC_ON : PCI_EXP_SLTCTL_PIC_OFF;
pic = populated ?
PCI_EXP_SLTCTL_PWR_IND_ON : PCI_EXP_SLTCTL_PWR_IND_OFF;
pci_word_test_and_set_mask(exp_cap + PCI_EXP_SLTCTL, pic);
}

Expand Down Expand Up @@ -769,10 +776,9 @@ void pcie_cap_slot_write_config(PCIDevice *dev,
* this is a work around for guests that overwrite
* control of powered off slots before powering them on.
*/
if ((sltsta & PCI_EXP_SLTSTA_PDS) && (val & PCI_EXP_SLTCTL_PCC) &&
(val & PCI_EXP_SLTCTL_PIC_OFF) == PCI_EXP_SLTCTL_PIC_OFF &&
(!(old_slt_ctl & PCI_EXP_SLTCTL_PCC) ||
(old_slt_ctl & PCI_EXP_SLTCTL_PIC_OFF) != PCI_EXP_SLTCTL_PIC_OFF)) {
if ((sltsta & PCI_EXP_SLTSTA_PDS) && pcie_sltctl_powered_off(val) &&
!pcie_sltctl_powered_off(old_slt_ctl))
{
pcie_cap_slot_do_unplug(dev);
}
pcie_cap_update_power(dev);
Expand Down
107 changes: 57 additions & 50 deletions hw/pci/shpc.c
Expand Up @@ -123,10 +123,13 @@
#define SHPC_PCI_TO_IDX(pci_slot) ((pci_slot) - 1)
#define SHPC_IDX_TO_PHYSICAL(slot) ((slot) + 1)

static uint16_t shpc_get_status(SHPCDevice *shpc, int slot, uint16_t msk)
static uint8_t shpc_get_status(SHPCDevice *shpc, int slot, uint16_t msk)
{
uint8_t *status = shpc->config + SHPC_SLOT_STATUS(slot);
return (pci_get_word(status) & msk) >> ctz32(msk);
uint16_t result = (pci_get_word(status) & msk) >> ctz32(msk);

assert(result <= UINT8_MAX);
return result;
}

static void shpc_set_status(SHPCDevice *shpc,
Expand Down Expand Up @@ -223,6 +226,7 @@ void shpc_reset(PCIDevice *d)
SHPC_SLOT_STATUS_PRSNT_MASK);
shpc_set_status(shpc, i, SHPC_LED_OFF, SHPC_SLOT_PWR_LED_MASK);
}
shpc_set_status(shpc, i, SHPC_LED_OFF, SHPC_SLOT_ATTN_LED_MASK);
shpc_set_status(shpc, i, 0, SHPC_SLOT_STATUS_66);
}
shpc_set_sec_bus_speed(shpc, SHPC_SEC_BUS_33);
Expand Down Expand Up @@ -254,60 +258,66 @@ static void shpc_free_devices_in_slot(SHPCDevice *shpc, int slot)
}
}

static void shpc_slot_command(SHPCDevice *shpc, uint8_t target,
static bool shpc_slot_is_off(uint8_t state, uint8_t power, uint8_t attn)
{
return state == SHPC_STATE_DISABLED && power == SHPC_LED_OFF;
}

static void shpc_slot_command(PCIDevice *d, uint8_t target,
uint8_t state, uint8_t power, uint8_t attn)
{
uint8_t current_state;
SHPCDevice *shpc = d->shpc;
int slot = SHPC_LOGICAL_TO_IDX(target);
uint8_t old_state = shpc_get_status(shpc, slot, SHPC_SLOT_STATE_MASK);
uint8_t old_power = shpc_get_status(shpc, slot, SHPC_SLOT_PWR_LED_MASK);
uint8_t old_attn = shpc_get_status(shpc, slot, SHPC_SLOT_ATTN_LED_MASK);

if (target < SHPC_CMD_TRGT_MIN || slot >= shpc->nslots) {
shpc_invalid_command(shpc);
return;
}
current_state = shpc_get_status(shpc, slot, SHPC_SLOT_STATE_MASK);
if (current_state == SHPC_STATE_ENABLED && state == SHPC_STATE_PWRONLY) {

if (old_state == SHPC_STATE_ENABLED && state == SHPC_STATE_PWRONLY) {
shpc_invalid_command(shpc);
return;
}

switch (power) {
case SHPC_LED_NO:
break;
default:
if (power == SHPC_LED_NO) {
power = old_power;
} else {
/* TODO: send event to monitor */
shpc_set_status(shpc, slot, power, SHPC_SLOT_PWR_LED_MASK);
}
switch (attn) {
case SHPC_LED_NO:
break;
default:

if (attn == SHPC_LED_NO) {
attn = old_attn;
} else {
/* TODO: send event to monitor */
shpc_set_status(shpc, slot, attn, SHPC_SLOT_ATTN_LED_MASK);
}

if ((current_state == SHPC_STATE_DISABLED && state == SHPC_STATE_PWRONLY) ||
(current_state == SHPC_STATE_DISABLED && state == SHPC_STATE_ENABLED)) {
shpc_set_status(shpc, slot, state, SHPC_SLOT_STATE_MASK);
} else if ((current_state == SHPC_STATE_ENABLED ||
current_state == SHPC_STATE_PWRONLY) &&
state == SHPC_STATE_DISABLED) {
if (state == SHPC_STATE_NO) {
state = old_state;
} else {
shpc_set_status(shpc, slot, state, SHPC_SLOT_STATE_MASK);
power = shpc_get_status(shpc, slot, SHPC_SLOT_PWR_LED_MASK);
/* TODO: track what monitor requested. */
/* Look at LED to figure out whether it's ok to remove the device. */
if (power == SHPC_LED_OFF) {
shpc_free_devices_in_slot(shpc, slot);
shpc_set_status(shpc, slot, 1, SHPC_SLOT_STATUS_MRL_OPEN);
shpc_set_status(shpc, slot, SHPC_SLOT_STATUS_PRSNT_EMPTY,
SHPC_SLOT_STATUS_PRSNT_MASK);
shpc->config[SHPC_SLOT_EVENT_LATCH(slot)] |=
SHPC_SLOT_EVENT_MRL |
SHPC_SLOT_EVENT_PRESENCE;
}
}

if (!shpc_slot_is_off(old_state, old_power, old_attn) &&
shpc_slot_is_off(state, power, attn))
{
shpc_free_devices_in_slot(shpc, slot);
shpc_set_status(shpc, slot, 1, SHPC_SLOT_STATUS_MRL_OPEN);
shpc_set_status(shpc, slot, SHPC_SLOT_STATUS_PRSNT_EMPTY,
SHPC_SLOT_STATUS_PRSNT_MASK);
shpc->config[SHPC_SLOT_EVENT_LATCH(slot)] |=
SHPC_SLOT_EVENT_MRL |
SHPC_SLOT_EVENT_PRESENCE;
}
}

static void shpc_command(SHPCDevice *shpc)
static void shpc_command(PCIDevice *d)
{
SHPCDevice *shpc = d->shpc;
uint8_t code = pci_get_byte(shpc->config + SHPC_CMD_CODE);
uint8_t speed;
uint8_t target;
Expand All @@ -328,7 +338,7 @@ static void shpc_command(SHPCDevice *shpc)
state = (code & SHPC_SLOT_STATE_MASK) >> SHPC_SLOT_STATE_SHIFT;
power = (code & SHPC_SLOT_PWR_LED_MASK) >> SHPC_SLOT_PWR_LED_SHIFT;
attn = (code & SHPC_SLOT_ATTN_LED_MASK) >> SHPC_SLOT_ATTN_LED_SHIFT;
shpc_slot_command(shpc, target, state, power, attn);
shpc_slot_command(d, target, state, power, attn);
break;
case 0x40 ... 0x47:
speed = code & SHPC_SEC_BUS_MASK;
Expand All @@ -346,10 +356,10 @@ static void shpc_command(SHPCDevice *shpc)
}
for (i = 0; i < shpc->nslots; ++i) {
if (!(shpc_get_status(shpc, i, SHPC_SLOT_STATUS_MRL_OPEN))) {
shpc_slot_command(shpc, i + SHPC_CMD_TRGT_MIN,
shpc_slot_command(d, i + SHPC_CMD_TRGT_MIN,
SHPC_STATE_PWRONLY, SHPC_LED_ON, SHPC_LED_NO);
} else {
shpc_slot_command(shpc, i + SHPC_CMD_TRGT_MIN,
shpc_slot_command(d, i + SHPC_CMD_TRGT_MIN,
SHPC_STATE_NO, SHPC_LED_OFF, SHPC_LED_NO);
}
}
Expand All @@ -367,10 +377,10 @@ static void shpc_command(SHPCDevice *shpc)
}
for (i = 0; i < shpc->nslots; ++i) {
if (!(shpc_get_status(shpc, i, SHPC_SLOT_STATUS_MRL_OPEN))) {
shpc_slot_command(shpc, i + SHPC_CMD_TRGT_MIN,
shpc_slot_command(d, i + SHPC_CMD_TRGT_MIN,
SHPC_STATE_ENABLED, SHPC_LED_ON, SHPC_LED_NO);
} else {
shpc_slot_command(shpc, i + SHPC_CMD_TRGT_MIN,
shpc_slot_command(d, i + SHPC_CMD_TRGT_MIN,
SHPC_STATE_NO, SHPC_LED_OFF, SHPC_LED_NO);
}
}
Expand Down Expand Up @@ -402,7 +412,7 @@ static void shpc_write(PCIDevice *d, unsigned addr, uint64_t val, int l)
shpc->config[a] &= ~(val & w1cmask); /* W1C: Write 1 to Clear */
}
if (ranges_overlap(addr, l, SHPC_CMD_CODE, 2)) {
shpc_command(shpc);
shpc_command(d);
}
shpc_interrupt_update(d);
}
Expand Down Expand Up @@ -486,8 +496,9 @@ static const MemoryRegionOps shpc_mmio_ops = {
.max_access_size = 4,
},
};
static void shpc_device_plug_common(PCIDevice *affected_dev, int *slot,
SHPCDevice *shpc, Error **errp)

static bool shpc_device_get_slot(PCIDevice *affected_dev, int *slot,
SHPCDevice *shpc, Error **errp)
{
int pci_slot = PCI_SLOT(affected_dev->devfn);
*slot = SHPC_PCI_TO_IDX(pci_slot);
Expand All @@ -497,21 +508,20 @@ static void shpc_device_plug_common(PCIDevice *affected_dev, int *slot,
"controller. Valid slots are between %d and %d.",
pci_slot, SHPC_IDX_TO_PCI(0),
SHPC_IDX_TO_PCI(shpc->nslots) - 1);
return;
return false;
}

return true;
}

void shpc_device_plug_cb(HotplugHandler *hotplug_dev, DeviceState *dev,
Error **errp)
{
Error *local_err = NULL;
PCIDevice *pci_hotplug_dev = PCI_DEVICE(hotplug_dev);
SHPCDevice *shpc = pci_hotplug_dev->shpc;
int slot;

shpc_device_plug_common(PCI_DEVICE(dev), &slot, shpc, &local_err);
if (local_err) {
error_propagate(errp, local_err);
if (!shpc_device_get_slot(PCI_DEVICE(dev), &slot, shpc, errp)) {
return;
}

Expand Down Expand Up @@ -553,16 +563,13 @@ void shpc_device_unplug_cb(HotplugHandler *hotplug_dev, DeviceState *dev,
void shpc_device_unplug_request_cb(HotplugHandler *hotplug_dev,
DeviceState *dev, Error **errp)
{
Error *local_err = NULL;
PCIDevice *pci_hotplug_dev = PCI_DEVICE(hotplug_dev);
SHPCDevice *shpc = pci_hotplug_dev->shpc;
uint8_t state;
uint8_t led;
int slot;

shpc_device_plug_common(PCI_DEVICE(dev), &slot, shpc, &local_err);
if (local_err) {
error_propagate(errp, local_err);
if (!shpc_device_get_slot(PCI_DEVICE(dev), &slot, shpc, errp)) {
return;
}

Expand Down
1 change: 1 addition & 0 deletions hw/pci/trace-events
Expand Up @@ -3,6 +3,7 @@
# pci.c
pci_update_mappings_del(const char *dev, uint32_t bus, uint32_t slot, uint32_t func, int bar, uint64_t addr, uint64_t size) "%s %02x:%02x.%x %d,0x%"PRIx64"+0x%"PRIx64
pci_update_mappings_add(const char *dev, uint32_t bus, uint32_t slot, uint32_t func, int bar, uint64_t addr, uint64_t size) "%s %02x:%02x.%x %d,0x%"PRIx64"+0x%"PRIx64
pci_route_irq(int dev_irq, const char *dev_path, int parent_irq, const char *parent_path) "IRQ %d @%s -> IRQ %d @%s"

# pci_host.c
pci_cfg_read(const char *dev, uint32_t bus, uint32_t slot, uint32_t func, unsigned offs, unsigned val) "%s %02x:%02x.%x @0x%x -> 0x%x"
Expand Down
8 changes: 5 additions & 3 deletions hw/smbios/smbios.c
Expand Up @@ -750,14 +750,16 @@ static void smbios_build_type_4_table(MachineState *ms, unsigned instance)
t->core_count = (ms->smp.cores > 255) ? 0xFF : ms->smp.cores;
t->core_enabled = t->core_count;

t->core_count2 = t->core_enabled2 = cpu_to_le16(ms->smp.cores);

t->thread_count = (ms->smp.threads > 255) ? 0xFF : ms->smp.threads;
t->thread_count2 = cpu_to_le16(ms->smp.threads);

t->processor_characteristics = cpu_to_le16(0x02); /* Unknown */
t->processor_family2 = cpu_to_le16(0x01); /* Other */

if (tbl_len == SMBIOS_TYPE_4_LEN_V30) {
t->core_count2 = t->core_enabled2 = cpu_to_le16(ms->smp.cores);
t->thread_count2 = cpu_to_le16(ms->smp.threads);
}

SMBIOS_BUILD_TABLE_POST;
smbios_type4_count++;
}
Expand Down
19 changes: 13 additions & 6 deletions hw/timer/hpet.c
Expand Up @@ -353,6 +353,16 @@ static const VMStateDescription vmstate_hpet = {
}
};

static void hpet_arm(HPETTimer *t, uint64_t ticks)
{
if (ticks < ns_to_ticks(INT64_MAX / 2)) {
timer_mod(t->qemu_timer,
qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + ticks_to_ns(ticks));
} else {
timer_del(t->qemu_timer);
}
}

/*
* timer expiration callback
*/
Expand All @@ -375,13 +385,11 @@ static void hpet_timer(void *opaque)
}
}
diff = hpet_calculate_diff(t, cur_tick);
timer_mod(t->qemu_timer,
qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + (int64_t)ticks_to_ns(diff));
hpet_arm(t, diff);
} else if (t->config & HPET_TN_32BIT && !timer_is_periodic(t)) {
if (t->wrap_flag) {
diff = hpet_calculate_diff(t, cur_tick);
timer_mod(t->qemu_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
(int64_t)ticks_to_ns(diff));
hpet_arm(t, diff);
t->wrap_flag = 0;
}
}
Expand All @@ -408,8 +416,7 @@ static void hpet_set_timer(HPETTimer *t)
t->wrap_flag = 1;
}
}
timer_mod(t->qemu_timer,
qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + (int64_t)ticks_to_ns(diff));
hpet_arm(t, diff);
}

static void hpet_del_timer(HPETTimer *t)
Expand Down
2 changes: 1 addition & 1 deletion hw/virtio/vhost-shadow-virtqueue.c
Expand Up @@ -522,7 +522,7 @@ static void vhost_svq_flush(VhostShadowVirtqueue *svq,
size_t vhost_svq_poll(VhostShadowVirtqueue *svq)
{
int64_t start_us = g_get_monotonic_time();
uint32_t len;
uint32_t len = 0;

do {
if (vhost_svq_more_used(svq)) {
Expand Down
10 changes: 6 additions & 4 deletions hw/virtio/vhost-user-gpio.c
Expand Up @@ -16,6 +16,7 @@
#include "trace.h"

#define REALIZE_CONNECTION_RETRIES 3
#define VHOST_NVQS 2

/* Features required from VirtIO */
static const int feature_bits[] = {
Expand Down Expand Up @@ -208,8 +209,7 @@ static void do_vhost_user_cleanup(VirtIODevice *vdev, VHostUserGPIO *gpio)
{
virtio_delete_queue(gpio->command_vq);
virtio_delete_queue(gpio->interrupt_vq);
g_free(gpio->vhost_dev.vqs);
gpio->vhost_dev.vqs = NULL;
g_free(gpio->vhost_vqs);
virtio_cleanup(vdev);
vhost_user_cleanup(&gpio->vhost_user);
}
Expand All @@ -229,6 +229,9 @@ static int vu_gpio_connect(DeviceState *dev, Error **errp)
vhost_dev_set_config_notifier(vhost_dev, &gpio_ops);
gpio->vhost_user.supports_config = true;

gpio->vhost_dev.nvqs = VHOST_NVQS;
gpio->vhost_dev.vqs = gpio->vhost_vqs;

ret = vhost_dev_init(vhost_dev, &gpio->vhost_user,
VHOST_BACKEND_TYPE_USER, 0, errp);
if (ret < 0) {
Expand Down Expand Up @@ -347,10 +350,9 @@ static void vu_gpio_device_realize(DeviceState *dev, Error **errp)

virtio_init(vdev, VIRTIO_ID_GPIO, sizeof(gpio->config));

gpio->vhost_dev.nvqs = 2;
gpio->command_vq = virtio_add_queue(vdev, 256, vu_gpio_handle_output);
gpio->interrupt_vq = virtio_add_queue(vdev, 256, vu_gpio_handle_output);
gpio->vhost_dev.vqs = g_new0(struct vhost_virtqueue, gpio->vhost_dev.nvqs);
gpio->vhost_vqs = g_new0(struct vhost_virtqueue, VHOST_NVQS);

gpio->connected = false;

Expand Down
5 changes: 3 additions & 2 deletions hw/virtio/vhost-user-i2c.c
Expand Up @@ -143,8 +143,6 @@ static void do_vhost_user_cleanup(VirtIODevice *vdev, VHostUserI2C *i2c)
vhost_user_cleanup(&i2c->vhost_user);
virtio_delete_queue(i2c->vq);
virtio_cleanup(vdev);
g_free(i2c->vhost_dev.vqs);
i2c->vhost_dev.vqs = NULL;
}

static int vu_i2c_connect(DeviceState *dev)
Expand Down Expand Up @@ -228,6 +226,7 @@ static void vu_i2c_device_realize(DeviceState *dev, Error **errp)
ret = vhost_dev_init(&i2c->vhost_dev, &i2c->vhost_user,
VHOST_BACKEND_TYPE_USER, 0, errp);
if (ret < 0) {
g_free(i2c->vhost_dev.vqs);
do_vhost_user_cleanup(vdev, i2c);
}

Expand All @@ -239,10 +238,12 @@ static void vu_i2c_device_unrealize(DeviceState *dev)
{
VirtIODevice *vdev = VIRTIO_DEVICE(dev);
VHostUserI2C *i2c = VHOST_USER_I2C(dev);
struct vhost_virtqueue *vhost_vqs = i2c->vhost_dev.vqs;

/* This will stop vhost backend if appropriate. */
vu_i2c_set_status(vdev, 0);
vhost_dev_cleanup(&i2c->vhost_dev);
g_free(vhost_vqs);
do_vhost_user_cleanup(vdev, i2c);
}

Expand Down
5 changes: 3 additions & 2 deletions hw/virtio/vhost-user-rng.c
Expand Up @@ -229,6 +229,7 @@ static void vu_rng_device_realize(DeviceState *dev, Error **errp)
return;

vhost_dev_init_failed:
g_free(rng->vhost_dev.vqs);
virtio_delete_queue(rng->req_vq);
virtio_add_queue_failed:
virtio_cleanup(vdev);
Expand All @@ -239,12 +240,12 @@ static void vu_rng_device_unrealize(DeviceState *dev)
{
VirtIODevice *vdev = VIRTIO_DEVICE(dev);
VHostUserRNG *rng = VHOST_USER_RNG(dev);
struct vhost_virtqueue *vhost_vqs = rng->vhost_dev.vqs;

vu_rng_set_status(vdev, 0);

vhost_dev_cleanup(&rng->vhost_dev);
g_free(rng->vhost_dev.vqs);
rng->vhost_dev.vqs = NULL;
g_free(vhost_vqs);
virtio_delete_queue(rng->req_vq);
virtio_cleanup(vdev);
vhost_user_cleanup(&rng->vhost_user);
Expand Down
30 changes: 15 additions & 15 deletions hw/virtio/vhost-user.c
Expand Up @@ -40,7 +40,7 @@

#define VHOST_MEMORY_BASELINE_NREGIONS 8
#define VHOST_USER_F_PROTOCOL_FEATURES 30
#define VHOST_USER_SLAVE_MAX_FDS 8
#define VHOST_USER_BACKEND_MAX_FDS 8

/*
* Set maximum number of RAM slots supported to
Expand Down Expand Up @@ -71,12 +71,12 @@ enum VhostUserProtocolFeature {
VHOST_USER_PROTOCOL_F_RARP = 2,
VHOST_USER_PROTOCOL_F_REPLY_ACK = 3,
VHOST_USER_PROTOCOL_F_NET_MTU = 4,
VHOST_USER_PROTOCOL_F_SLAVE_REQ = 5,
VHOST_USER_PROTOCOL_F_BACKEND_REQ = 5,
VHOST_USER_PROTOCOL_F_CROSS_ENDIAN = 6,
VHOST_USER_PROTOCOL_F_CRYPTO_SESSION = 7,
VHOST_USER_PROTOCOL_F_PAGEFAULT = 8,
VHOST_USER_PROTOCOL_F_CONFIG = 9,
VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD = 10,
VHOST_USER_PROTOCOL_F_BACKEND_SEND_FD = 10,
VHOST_USER_PROTOCOL_F_HOST_NOTIFIER = 11,
VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD = 12,
VHOST_USER_PROTOCOL_F_RESET_DEVICE = 13,
Expand Down Expand Up @@ -110,7 +110,7 @@ typedef enum VhostUserRequest {
VHOST_USER_SET_VRING_ENABLE = 18,
VHOST_USER_SEND_RARP = 19,
VHOST_USER_NET_SET_MTU = 20,
VHOST_USER_SET_SLAVE_REQ_FD = 21,
VHOST_USER_SET_BACKEND_REQ_FD = 21,
VHOST_USER_IOTLB_MSG = 22,
VHOST_USER_SET_VRING_ENDIAN = 23,
VHOST_USER_GET_CONFIG = 24,
Expand All @@ -134,11 +134,11 @@ typedef enum VhostUserRequest {
} VhostUserRequest;

typedef enum VhostUserSlaveRequest {
VHOST_USER_SLAVE_NONE = 0,
VHOST_USER_SLAVE_IOTLB_MSG = 1,
VHOST_USER_SLAVE_CONFIG_CHANGE_MSG = 2,
VHOST_USER_SLAVE_VRING_HOST_NOTIFIER_MSG = 3,
VHOST_USER_SLAVE_MAX
VHOST_USER_BACKEND_NONE = 0,
VHOST_USER_BACKEND_IOTLB_MSG = 1,
VHOST_USER_BACKEND_CONFIG_CHANGE_MSG = 2,
VHOST_USER_BACKEND_VRING_HOST_NOTIFIER_MSG = 3,
VHOST_USER_BACKEND_MAX
} VhostUserSlaveRequest;

typedef struct VhostUserMemoryRegion {
Expand Down Expand Up @@ -1638,13 +1638,13 @@ static gboolean slave_read(QIOChannel *ioc, GIOCondition condition,
}

switch (hdr.request) {
case VHOST_USER_SLAVE_IOTLB_MSG:
case VHOST_USER_BACKEND_IOTLB_MSG:
ret = vhost_backend_handle_iotlb_msg(dev, &payload.iotlb);
break;
case VHOST_USER_SLAVE_CONFIG_CHANGE_MSG :
case VHOST_USER_BACKEND_CONFIG_CHANGE_MSG:
ret = vhost_user_slave_handle_config_change(dev);
break;
case VHOST_USER_SLAVE_VRING_HOST_NOTIFIER_MSG:
case VHOST_USER_BACKEND_VRING_HOST_NOTIFIER_MSG:
ret = vhost_user_slave_handle_vring_host_notifier(dev, &payload.area,
fd ? fd[0] : -1);
break;
Expand Down Expand Up @@ -1696,7 +1696,7 @@ static gboolean slave_read(QIOChannel *ioc, GIOCondition condition,
static int vhost_setup_slave_channel(struct vhost_dev *dev)
{
VhostUserMsg msg = {
.hdr.request = VHOST_USER_SET_SLAVE_REQ_FD,
.hdr.request = VHOST_USER_SET_BACKEND_REQ_FD,
.hdr.flags = VHOST_USER_VERSION,
};
struct vhost_user *u = dev->opaque;
Expand All @@ -1707,7 +1707,7 @@ static int vhost_setup_slave_channel(struct vhost_dev *dev)
QIOChannel *ioc;

if (!virtio_has_feature(dev->protocol_features,
VHOST_USER_PROTOCOL_F_SLAVE_REQ)) {
VHOST_USER_PROTOCOL_F_BACKEND_REQ)) {
return 0;
}

Expand Down Expand Up @@ -2065,7 +2065,7 @@ static int vhost_user_backend_init(struct vhost_dev *dev, void *opaque,

if (virtio_has_feature(features, VIRTIO_F_IOMMU_PLATFORM) &&
!(virtio_has_feature(dev->protocol_features,
VHOST_USER_PROTOCOL_F_SLAVE_REQ) &&
VHOST_USER_PROTOCOL_F_BACKEND_REQ) &&
virtio_has_feature(dev->protocol_features,
VHOST_USER_PROTOCOL_F_REPLY_ACK))) {
error_setg(errp, "IOMMU support requires reply-ack and "
Expand Down
17 changes: 2 additions & 15 deletions hw/virtio/vhost-vdpa.c
Expand Up @@ -689,26 +689,11 @@ static int vhost_vdpa_get_device_id(struct vhost_dev *dev,
return ret;
}

static void vhost_vdpa_reset_svq(struct vhost_vdpa *v)
{
if (!v->shadow_vqs_enabled) {
return;
}

for (unsigned i = 0; i < v->shadow_vqs->len; ++i) {
VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, i);
vhost_svq_stop(svq);
}
}

static int vhost_vdpa_reset_device(struct vhost_dev *dev)
{
struct vhost_vdpa *v = dev->opaque;
int ret;
uint8_t status = 0;

vhost_vdpa_reset_svq(v);

ret = vhost_vdpa_call(dev, VHOST_VDPA_SET_STATUS, &status);
trace_vhost_vdpa_reset_device(dev, status);
return ret;
Expand Down Expand Up @@ -1100,6 +1085,8 @@ static void vhost_vdpa_svqs_stop(struct vhost_dev *dev)

for (unsigned i = 0; i < v->shadow_vqs->len; ++i) {
VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, i);

vhost_svq_stop(svq);
vhost_vdpa_svq_unmap_rings(dev, svq);

event_notifier_cleanup(&svq->hdev_kick);
Expand Down
12 changes: 6 additions & 6 deletions hw/virtio/virtio-qmp.c
Expand Up @@ -42,12 +42,12 @@ enum VhostUserProtocolFeature {
VHOST_USER_PROTOCOL_F_RARP = 2,
VHOST_USER_PROTOCOL_F_REPLY_ACK = 3,
VHOST_USER_PROTOCOL_F_NET_MTU = 4,
VHOST_USER_PROTOCOL_F_SLAVE_REQ = 5,
VHOST_USER_PROTOCOL_F_BACKEND_REQ = 5,
VHOST_USER_PROTOCOL_F_CROSS_ENDIAN = 6,
VHOST_USER_PROTOCOL_F_CRYPTO_SESSION = 7,
VHOST_USER_PROTOCOL_F_PAGEFAULT = 8,
VHOST_USER_PROTOCOL_F_CONFIG = 9,
VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD = 10,
VHOST_USER_PROTOCOL_F_BACKEND_SEND_FD = 10,
VHOST_USER_PROTOCOL_F_HOST_NOTIFIER = 11,
VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD = 12,
VHOST_USER_PROTOCOL_F_RESET_DEVICE = 13,
Expand Down Expand Up @@ -101,8 +101,8 @@ static const qmp_virtio_feature_map_t vhost_user_protocol_map[] = {
"supported"),
FEATURE_ENTRY(VHOST_USER_PROTOCOL_F_NET_MTU, \
"VHOST_USER_PROTOCOL_F_NET_MTU: Expose host MTU to guest supported"),
FEATURE_ENTRY(VHOST_USER_PROTOCOL_F_SLAVE_REQ, \
"VHOST_USER_PROTOCOL_F_SLAVE_REQ: Socket fd for back-end initiated "
FEATURE_ENTRY(VHOST_USER_PROTOCOL_F_BACKEND_REQ, \
"VHOST_USER_PROTOCOL_F_BACKEND_REQ: Socket fd for back-end initiated "
"requests supported"),
FEATURE_ENTRY(VHOST_USER_PROTOCOL_F_CROSS_ENDIAN, \
"VHOST_USER_PROTOCOL_F_CROSS_ENDIAN: Endianness of VQs for legacy "
Expand All @@ -116,8 +116,8 @@ static const qmp_virtio_feature_map_t vhost_user_protocol_map[] = {
FEATURE_ENTRY(VHOST_USER_PROTOCOL_F_CONFIG, \
"VHOST_USER_PROTOCOL_F_CONFIG: Vhost-user messaging for virtio "
"device configuration space supported"),
FEATURE_ENTRY(VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD, \
"VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD: Slave fd communication "
FEATURE_ENTRY(VHOST_USER_PROTOCOL_F_BACKEND_SEND_FD, \
"VHOST_USER_PROTOCOL_F_BACKEND_SEND_FD: Slave fd communication "
"channel supported"),
FEATURE_ENTRY(VHOST_USER_PROTOCOL_F_HOST_NOTIFIER, \
"VHOST_USER_PROTOCOL_F_HOST_NOTIFIER: Host notifiers for specified "
Expand Down
10 changes: 10 additions & 0 deletions include/exec/memory.h
Expand Up @@ -1731,6 +1731,16 @@ void memory_region_notify_iommu(IOMMUMemoryRegion *iommu_mr,
void memory_region_notify_iommu_one(IOMMUNotifier *notifier,
IOMMUTLBEvent *event);

/**
* memory_region_unmap_iommu_notifier_range: notify a unmap for an IOMMU
* translation that covers the
* range of a notifier
*
* @notifier: the notifier to be notified
*/
void memory_region_unmap_iommu_notifier_range(IOMMUNotifier *n);


/**
* memory_region_register_iommu_notifier: register a notifier for changes to
* IOMMU translation entries.
Expand Down
2 changes: 1 addition & 1 deletion include/hw/cxl/cxl_device.h
Expand Up @@ -170,7 +170,7 @@ CXL_DEVICE_CAPABILITY_HEADER_REGISTER(MEMORY_DEVICE,
CXL_DEVICE_CAP_HDR1_OFFSET +
CXL_DEVICE_CAP_REG_SIZE * 2)

int cxl_initialize_mailbox(CXLDeviceState *cxl_dstate);
void cxl_initialize_mailbox(CXLDeviceState *cxl_dstate);
void cxl_process_mailbox(CXLDeviceState *cxl_dstate);

#define cxl_device_cap_init(dstate, reg, cap_id) \
Expand Down
5 changes: 2 additions & 3 deletions include/hw/i386/microvm.h
Expand Up @@ -50,9 +50,8 @@
*/

/* Platform virtio definitions */
#define VIRTIO_MMIO_BASE 0xfeb00000
#define VIRTIO_CMDLINE_MAXLEN 64
#define VIRTIO_CMDLINE_TOTAL_MAX_LEN ((VIRTIO_CMDLINE_MAXLEN + 1) * 16)
#define VIRTIO_MMIO_BASE 0xfeb00000
#define VIRTIO_CMDLINE_MAXLEN 64

#define GED_MMIO_BASE 0xfea00000
#define GED_MMIO_BASE_MEMHP (GED_MMIO_BASE + 0x100)
Expand Down
3 changes: 0 additions & 3 deletions include/hw/i386/pc.h
Expand Up @@ -127,9 +127,6 @@ struct PCMachineClass {

/* create kvmclock device even when KVM PV features are not exposed */
bool kvmclock_create_always;

/* skip passing an rng seed for legacy machines */
bool legacy_no_rng_seed;
};

#define TYPE_PC_MACHINE "generic-pc-machine"
Expand Down
3 changes: 1 addition & 2 deletions include/hw/i386/x86.h
Expand Up @@ -123,8 +123,7 @@ void x86_bios_rom_init(MachineState *ms, const char *default_firmware,
void x86_load_linux(X86MachineState *x86ms,
FWCfgState *fw_cfg,
int acpi_data_size,
bool pvh_enabled,
bool legacy_no_rng_seed);
bool pvh_enabled);

bool x86_machine_is_smm_enabled(const X86MachineState *x86ms);
bool x86_machine_is_acpi_enabled(const X86MachineState *x86ms);
Expand Down
31 changes: 0 additions & 31 deletions include/hw/nvram/fw_cfg.h
Expand Up @@ -117,37 +117,6 @@ struct FWCfgMemState {
*/
void fw_cfg_add_bytes(FWCfgState *s, uint16_t key, void *data, size_t len);

/**
* fw_cfg_add_bytes_callback:
* @s: fw_cfg device being modified
* @key: selector key value for new fw_cfg item
* @select_cb: callback function when selecting
* @write_cb: callback function after a write
* @callback_opaque: argument to be passed into callback function
* @data: pointer to start of item data
* @len: size of item data
* @read_only: is file read only
*
* Add a new fw_cfg item, available by selecting the given key, as a raw
* "blob" of the given size. The data referenced by the starting pointer
* is only linked, NOT copied, into the data structure of the fw_cfg device.
*/
void fw_cfg_add_bytes_callback(FWCfgState *s, uint16_t key,
FWCfgCallback select_cb,
FWCfgWriteCallback write_cb,
void *callback_opaque,
void *data, size_t len,
bool read_only);

/**
* fw_cfg_read_bytes_ptr:
* @s: fw_cfg device being modified
* @key: selector key value for new fw_cfg item
*
* Reads an existing fw_cfg data pointer.
*/
void *fw_cfg_read_bytes_ptr(FWCfgState *s, uint16_t key);

/**
* fw_cfg_add_string:
* @s: fw_cfg device being modified
Expand Down
8 changes: 0 additions & 8 deletions include/hw/pci/pcie.h
Expand Up @@ -27,14 +27,6 @@
#include "hw/pci/pcie_sriov.h"
#include "hw/hotplug.h"

typedef enum {
/* for attention and power indicator */
PCI_EXP_HP_IND_RESERVED = PCI_EXP_SLTCTL_IND_RESERVED,
PCI_EXP_HP_IND_ON = PCI_EXP_SLTCTL_IND_ON,
PCI_EXP_HP_IND_BLINK = PCI_EXP_SLTCTL_IND_BLINK,
PCI_EXP_HP_IND_OFF = PCI_EXP_SLTCTL_IND_OFF,
} PCIExpressIndicator;

typedef enum {
/* these bits must match the bits in Slot Control/Status registers.
* PCI_EXP_HP_EV_xxx = PCI_EXP_SLTCTL_xxxE = PCI_EXP_SLTSTA_xxx
Expand Down
14 changes: 0 additions & 14 deletions include/hw/pci/pcie_regs.h
Expand Up @@ -66,20 +66,6 @@ typedef enum PCIExpLinkWidth {

#define PCI_EXP_SLTCAP_PSN_SHIFT ctz32(PCI_EXP_SLTCAP_PSN)

#define PCI_EXP_SLTCTL_IND_RESERVED 0x0
#define PCI_EXP_SLTCTL_IND_ON 0x1
#define PCI_EXP_SLTCTL_IND_BLINK 0x2
#define PCI_EXP_SLTCTL_IND_OFF 0x3
#define PCI_EXP_SLTCTL_AIC_SHIFT ctz32(PCI_EXP_SLTCTL_AIC)
#define PCI_EXP_SLTCTL_AIC_OFF \
(PCI_EXP_SLTCTL_IND_OFF << PCI_EXP_SLTCTL_AIC_SHIFT)

#define PCI_EXP_SLTCTL_PIC_SHIFT ctz32(PCI_EXP_SLTCTL_PIC)
#define PCI_EXP_SLTCTL_PIC_OFF \
(PCI_EXP_SLTCTL_IND_OFF << PCI_EXP_SLTCTL_PIC_SHIFT)
#define PCI_EXP_SLTCTL_PIC_ON \
(PCI_EXP_SLTCTL_IND_ON << PCI_EXP_SLTCTL_PIC_SHIFT)

#define PCI_EXP_SLTCTL_SUPPORTED \
(PCI_EXP_SLTCTL_ABPE | \
PCI_EXP_SLTCTL_PDCE | \
Expand Down
2 changes: 1 addition & 1 deletion include/hw/virtio/vhost-user-gpio.h
Expand Up @@ -23,7 +23,7 @@ struct VHostUserGPIO {
VirtIODevice parent_obj;
CharBackend chardev;
struct virtio_gpio_config config;
struct vhost_virtqueue *vhost_vq;
struct vhost_virtqueue *vhost_vqs;
struct vhost_dev vhost_dev;
VhostUserState vhost_user;
VirtQueue *command_vq;
Expand Down
12 changes: 11 additions & 1 deletion include/qemu/bswap.h
Expand Up @@ -125,11 +125,20 @@ CPU_CONVERT(le, 32, uint32_t)
CPU_CONVERT(le, 64, uint64_t)

/*
* Same as cpu_to_le{16,32}, except that gcc will figure the result is
* Same as cpu_to_le{16,32,64}, except that gcc will figure the result is
* a compile-time constant if you pass in a constant. So this can be
* used to initialize static variables.
*/
#if HOST_BIG_ENDIAN
# define const_le64(_x) \
((((_x) & 0x00000000000000ffU) << 56) | \
(((_x) & 0x000000000000ff00U) << 40) | \
(((_x) & 0x0000000000ff0000U) << 24) | \
(((_x) & 0x00000000ff000000U) << 8) | \
(((_x) & 0x000000ff00000000U) >> 8) | \
(((_x) & 0x0000ff0000000000U) >> 24) | \
(((_x) & 0x00ff000000000000U) >> 40) | \
(((_x) & 0xff00000000000000U) >> 56))
# define const_le32(_x) \
((((_x) & 0x000000ffU) << 24) | \
(((_x) & 0x0000ff00U) << 8) | \
Expand All @@ -139,6 +148,7 @@ CPU_CONVERT(le, 64, uint64_t)
((((_x) & 0x00ff) << 8) | \
(((_x) & 0xff00) >> 8))
#else
# define const_le64(_x) (_x)
# define const_le32(_x) (_x)
# define const_le16(_x) (_x)
#endif
Expand Down
12 changes: 12 additions & 0 deletions include/qemu/uuid.h
Expand Up @@ -61,6 +61,18 @@ typedef struct {
(clock_seq_hi_and_reserved), (clock_seq_low), (node0), (node1), (node2),\
(node3), (node4), (node5) }

/* Normal (network byte order) UUID */
#define UUID(time_low, time_mid, time_hi_and_version, \
clock_seq_hi_and_reserved, clock_seq_low, node0, node1, node2, \
node3, node4, node5) \
{ ((time_low) >> 24) & 0xff, ((time_low) >> 16) & 0xff, \
((time_low) >> 8) & 0xff, (time_low) & 0xff, \
((time_mid) >> 8) & 0xff, (time_mid) & 0xff, \
((time_hi_and_version) >> 8) & 0xff, (time_hi_and_version) & 0xff, \
(clock_seq_hi_and_reserved), (clock_seq_low), \
(node0), (node1), (node2), (node3), (node4), (node5) \
}

#define UUID_FMT "%02hhx%02hhx%02hhx%02hhx-" \
"%02hhx%02hhx-%02hhx%02hhx-" \
"%02hhx%02hhx-" \
Expand Down
17 changes: 15 additions & 2 deletions softmmu/memory.c
Expand Up @@ -1900,6 +1900,7 @@ int memory_region_register_iommu_notifier(MemoryRegion *mr,
iommu_mr = IOMMU_MEMORY_REGION(mr);
assert(n->notifier_flags != IOMMU_NOTIFIER_NONE);
assert(n->start <= n->end);
assert(n->end <= memory_region_size(mr));
assert(n->iommu_idx >= 0 &&
n->iommu_idx < memory_region_iommu_num_indexes(iommu_mr));

Expand All @@ -1923,7 +1924,6 @@ uint64_t memory_region_iommu_get_min_page_size(IOMMUMemoryRegion *iommu_mr)

void memory_region_iommu_replay(IOMMUMemoryRegion *iommu_mr, IOMMUNotifier *n)
{
MemoryRegion *mr = MEMORY_REGION(iommu_mr);
IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
hwaddr addr, granularity;
IOMMUTLBEntry iotlb;
Expand All @@ -1936,7 +1936,7 @@ void memory_region_iommu_replay(IOMMUMemoryRegion *iommu_mr, IOMMUNotifier *n)

granularity = memory_region_iommu_get_min_page_size(iommu_mr);

for (addr = 0; addr < memory_region_size(mr); addr += granularity) {
for (addr = n->start; addr < n->end; addr += granularity) {
iotlb = imrc->translate(iommu_mr, addr, IOMMU_NONE, n->iommu_idx);
if (iotlb.perm != IOMMU_NONE) {
n->notify(n, &iotlb);
Expand Down Expand Up @@ -1996,6 +1996,19 @@ void memory_region_notify_iommu_one(IOMMUNotifier *notifier,
}
}

void memory_region_unmap_iommu_notifier_range(IOMMUNotifier *n)
{
IOMMUTLBEvent event;

event.type = IOMMU_NOTIFIER_UNMAP;
event.entry.target_as = &address_space_memory;
event.entry.iova = n->start;
event.entry.perm = IOMMU_NONE;
event.entry.addr_mask = n->end - n->start;

memory_region_notify_iommu_one(n, &event);
}

void memory_region_notify_iommu(IOMMUMemoryRegion *iommu_mr,
int iommu_idx,
IOMMUTLBEvent event)
Expand Down
27 changes: 17 additions & 10 deletions subprojects/libvhost-user/libvhost-user.c
Expand Up @@ -140,7 +140,7 @@ vu_request_to_string(unsigned int req)
REQ(VHOST_USER_SET_VRING_ENABLE),
REQ(VHOST_USER_SEND_RARP),
REQ(VHOST_USER_NET_SET_MTU),
REQ(VHOST_USER_SET_SLAVE_REQ_FD),
REQ(VHOST_USER_SET_BACKEND_REQ_FD),
REQ(VHOST_USER_IOTLB_MSG),
REQ(VHOST_USER_SET_VRING_ENDIAN),
REQ(VHOST_USER_GET_CONFIG),
Expand Down Expand Up @@ -1365,7 +1365,7 @@ bool vu_set_queue_host_notifier(VuDev *dev, VuVirtq *vq, int fd,
int qidx = vq - dev->vq;
int fd_num = 0;
VhostUserMsg vmsg = {
.request = VHOST_USER_SLAVE_VRING_HOST_NOTIFIER_MSG,
.request = VHOST_USER_BACKEND_VRING_HOST_NOTIFIER_MSG,
.flags = VHOST_USER_VERSION | VHOST_USER_NEED_REPLY_MASK,
.size = sizeof(vmsg.payload.area),
.payload.area = {
Expand All @@ -1383,7 +1383,7 @@ bool vu_set_queue_host_notifier(VuDev *dev, VuVirtq *vq, int fd,

vmsg.fd_num = fd_num;

if (!vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD)) {
if (!vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_BACKEND_SEND_FD)) {
return false;
}

Expand Down Expand Up @@ -1461,9 +1461,9 @@ vu_get_protocol_features_exec(VuDev *dev, VhostUserMsg *vmsg)
*/
uint64_t features = 1ULL << VHOST_USER_PROTOCOL_F_MQ |
1ULL << VHOST_USER_PROTOCOL_F_LOG_SHMFD |
1ULL << VHOST_USER_PROTOCOL_F_SLAVE_REQ |
1ULL << VHOST_USER_PROTOCOL_F_BACKEND_REQ |
1ULL << VHOST_USER_PROTOCOL_F_HOST_NOTIFIER |
1ULL << VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD |
1ULL << VHOST_USER_PROTOCOL_F_BACKEND_SEND_FD |
1ULL << VHOST_USER_PROTOCOL_F_REPLY_ACK |
1ULL << VHOST_USER_PROTOCOL_F_CONFIGURE_MEM_SLOTS;

Expand Down Expand Up @@ -1494,7 +1494,7 @@ vu_set_protocol_features_exec(VuDev *dev, VhostUserMsg *vmsg)

if (vu_has_protocol_feature(dev,
VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS) &&
(!vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_SLAVE_REQ) ||
(!vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_BACKEND_REQ) ||
!vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_REPLY_ACK))) {
/*
* The use case for using messages for kick/call is simulation, to make
Expand All @@ -1507,7 +1507,7 @@ vu_set_protocol_features_exec(VuDev *dev, VhostUserMsg *vmsg)
* that actually enables the simulation case.
*/
vu_panic(dev,
"F_IN_BAND_NOTIFICATIONS requires F_SLAVE_REQ && F_REPLY_ACK");
"F_IN_BAND_NOTIFICATIONS requires F_BACKEND_REQ && F_REPLY_ACK");
return false;
}

Expand Down Expand Up @@ -1910,7 +1910,7 @@ vu_process_message(VuDev *dev, VhostUserMsg *vmsg)
return vu_get_queue_num_exec(dev, vmsg);
case VHOST_USER_SET_VRING_ENABLE:
return vu_set_vring_enable_exec(dev, vmsg);
case VHOST_USER_SET_SLAVE_REQ_FD:
case VHOST_USER_SET_BACKEND_REQ_FD:
return vu_set_slave_req_fd(dev, vmsg);
case VHOST_USER_GET_CONFIG:
return vu_get_config(dev, vmsg);
Expand Down Expand Up @@ -2416,9 +2416,9 @@ static void _vu_queue_notify(VuDev *dev, VuVirtq *vq, bool sync)
if (vq->call_fd < 0 &&
vu_has_protocol_feature(dev,
VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS) &&
vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_SLAVE_REQ)) {
vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_BACKEND_REQ)) {
VhostUserMsg vmsg = {
.request = VHOST_USER_SLAVE_VRING_CALL,
.request = VHOST_USER_BACKEND_VRING_CALL,
.flags = VHOST_USER_VERSION,
.size = sizeof(vmsg.payload.state),
.payload.state = {
Expand Down Expand Up @@ -2553,6 +2553,10 @@ virtqueue_alloc_element(size_t sz,

assert(sz >= sizeof(VuVirtqElement));
elem = malloc(out_sg_end);
if (!elem) {
DPRINT("%s: failed to malloc virtqueue element\n", __func__);
return NULL;
}
elem->out_num = out_num;
elem->in_num = in_num;
elem->in_sg = (void *)elem + in_sg_ofs;
Expand Down Expand Up @@ -2639,6 +2643,9 @@ vu_queue_map_desc(VuDev *dev, VuVirtq *vq, unsigned int idx, size_t sz)

/* Now copy what we have collected and mapped */
elem = virtqueue_alloc_element(sz, out_num, in_num);
if (!elem) {
return NULL;
}
elem->index = idx;
for (i = 0; i < out_num; i++) {
elem->out_sg[i] = iov[i];
Expand Down
20 changes: 10 additions & 10 deletions subprojects/libvhost-user/libvhost-user.h
Expand Up @@ -54,12 +54,12 @@ enum VhostUserProtocolFeature {
VHOST_USER_PROTOCOL_F_RARP = 2,
VHOST_USER_PROTOCOL_F_REPLY_ACK = 3,
VHOST_USER_PROTOCOL_F_NET_MTU = 4,
VHOST_USER_PROTOCOL_F_SLAVE_REQ = 5,
VHOST_USER_PROTOCOL_F_BACKEND_REQ = 5,
VHOST_USER_PROTOCOL_F_CROSS_ENDIAN = 6,
VHOST_USER_PROTOCOL_F_CRYPTO_SESSION = 7,
VHOST_USER_PROTOCOL_F_PAGEFAULT = 8,
VHOST_USER_PROTOCOL_F_CONFIG = 9,
VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD = 10,
VHOST_USER_PROTOCOL_F_BACKEND_SEND_FD = 10,
VHOST_USER_PROTOCOL_F_HOST_NOTIFIER = 11,
VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD = 12,
VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS = 14,
Expand Down Expand Up @@ -92,7 +92,7 @@ typedef enum VhostUserRequest {
VHOST_USER_SET_VRING_ENABLE = 18,
VHOST_USER_SEND_RARP = 19,
VHOST_USER_NET_SET_MTU = 20,
VHOST_USER_SET_SLAVE_REQ_FD = 21,
VHOST_USER_SET_BACKEND_REQ_FD = 21,
VHOST_USER_IOTLB_MSG = 22,
VHOST_USER_SET_VRING_ENDIAN = 23,
VHOST_USER_GET_CONFIG = 24,
Expand All @@ -113,13 +113,13 @@ typedef enum VhostUserRequest {
} VhostUserRequest;

typedef enum VhostUserSlaveRequest {
VHOST_USER_SLAVE_NONE = 0,
VHOST_USER_SLAVE_IOTLB_MSG = 1,
VHOST_USER_SLAVE_CONFIG_CHANGE_MSG = 2,
VHOST_USER_SLAVE_VRING_HOST_NOTIFIER_MSG = 3,
VHOST_USER_SLAVE_VRING_CALL = 4,
VHOST_USER_SLAVE_VRING_ERR = 5,
VHOST_USER_SLAVE_MAX
VHOST_USER_BACKEND_NONE = 0,
VHOST_USER_BACKEND_IOTLB_MSG = 1,
VHOST_USER_BACKEND_CONFIG_CHANGE_MSG = 2,
VHOST_USER_BACKEND_VRING_HOST_NOTIFIER_MSG = 3,
VHOST_USER_BACKEND_VRING_CALL = 4,
VHOST_USER_BACKEND_VRING_ERR = 5,
VHOST_USER_BACKEND_MAX
} VhostUserSlaveRequest;

typedef struct VhostUserMemoryRegion {
Expand Down
Binary file modified tests/data/acpi/q35/DSDT.cxl
Binary file not shown.
Binary file removed tests/data/acpi/virt/APIC.memhp
Binary file not shown.
Binary file removed tests/data/acpi/virt/APIC.numamem
Binary file not shown.
Binary file removed tests/data/acpi/virt/DSDT.numamem
Binary file not shown.
Binary file removed tests/data/acpi/virt/FACP.memhp
Binary file not shown.
Binary file removed tests/data/acpi/virt/FACP.numamem
Binary file not shown.
Binary file removed tests/data/acpi/virt/GTDT.memhp
Binary file not shown.
Binary file removed tests/data/acpi/virt/GTDT.numamem
Binary file not shown.
Binary file removed tests/data/acpi/virt/IORT.memhp
Binary file not shown.
Binary file removed tests/data/acpi/virt/IORT.numamem
Binary file not shown.
Binary file removed tests/data/acpi/virt/IORT.pxb
Binary file not shown.
Binary file removed tests/data/acpi/virt/MCFG.memhp
Binary file not shown.
Binary file removed tests/data/acpi/virt/MCFG.numamem
Binary file not shown.
Binary file removed tests/data/acpi/virt/SPCR.memhp
Binary file not shown.
Binary file removed tests/data/acpi/virt/SPCR.numamem
Binary file not shown.