Showing with 250 additions and 76 deletions.
  1. +1 −1 configs/targets/s390x-linux-user.mak
  2. +1 −1 configs/targets/s390x-softmmu.mak
  3. +7 −0 docs/devel/multiple-iothreads.txt
  4. +14 −0 gdb-xml/s390-virt-kvm.xml
  5. +0 −4 gdb-xml/s390-virt.xml
  6. +4 −1 hw/9pfs/xen-9p-backend.c
  7. +2 −1 hw/block/dataplane/virtio-blk.c
  8. +3 −2 hw/block/dataplane/xen-block.c
  9. +2 −1 hw/char/virtio-serial-bus.c
  10. +6 −3 hw/display/qxl.c
  11. +4 −2 hw/display/virtio-gpu.c
  12. +2 −1 hw/ide/ahci.c
  13. +1 −0 hw/ide/ahci_internal.h
  14. +3 −1 hw/ide/core.c
  15. +7 −0 hw/intc/apic.c
  16. +7 −0 hw/misc/bcm2835_property.c
  17. +4 −2 hw/misc/imx_rngc.c
  18. +1 −1 hw/misc/macio/mac_dbdma.c
  19. +2 −1 hw/net/virtio-net.c
  20. +4 −2 hw/nvme/ctrl.c
  21. +7 −0 hw/pci-host/raven.c
  22. +1 −1 hw/rdma/Kconfig
  23. +5 −3 hw/rdma/meson.build
  24. +0 −4 hw/rdma/rdma_rm.c
  25. +6 −0 hw/scsi/lsi53c895a.c
  26. +2 −1 hw/scsi/mptsas.c
  27. +2 −1 hw/scsi/scsi-bus.c
  28. +2 −1 hw/scsi/vmw_pvscsi.c
  29. +2 −1 hw/usb/dev-uas.c
  30. +2 −1 hw/usb/hcd-dwc2.c
  31. +2 −1 hw/usb/hcd-ehci.c
  32. +1 −1 hw/usb/hcd-uhci.c
  33. +4 −2 hw/usb/host-libusb.c
  34. +4 −2 hw/usb/redirect.c
  35. +2 −1 hw/usb/xen-usb.c
  36. +3 −2 hw/virtio/virtio-balloon.c
  37. +2 −1 hw/virtio/virtio-crypto.c
  38. +16 −2 include/block/aio.h
  39. +5 −0 include/exec/memory.h
  40. +7 −0 include/hw/qdev-core.h
  41. +5 −2 include/qemu/main-loop.h
  42. +8 −0 scripts/checkpatch.pl
  43. +16 −0 softmmu/memory.c
  44. +45 −20 target/s390x/gdbstub.c
  45. +2 −1 tests/qtest/vhost-user-test.c
  46. +2 −1 tests/unit/ptimer-test-stubs.c
  47. +17 −1 util/async.c
  48. +4 −2 util/main-loop.c
  49. +1 −0 util/trace-events
2 changes: 1 addition & 1 deletion configs/targets/s390x-linux-user.mak
Expand Up @@ -2,4 +2,4 @@ TARGET_ARCH=s390x
TARGET_SYSTBL_ABI=common,64
TARGET_SYSTBL=syscall.tbl
TARGET_BIG_ENDIAN=y
TARGET_XML_FILES= gdb-xml/s390x-core64.xml gdb-xml/s390-acr.xml gdb-xml/s390-fpr.xml gdb-xml/s390-vx.xml gdb-xml/s390-cr.xml gdb-xml/s390-virt.xml gdb-xml/s390-gs.xml
TARGET_XML_FILES= gdb-xml/s390x-core64.xml gdb-xml/s390-acr.xml gdb-xml/s390-fpr.xml gdb-xml/s390-vx.xml gdb-xml/s390-cr.xml gdb-xml/s390-virt.xml gdb-xml/s390-virt-kvm.xml gdb-xml/s390-gs.xml
2 changes: 1 addition & 1 deletion configs/targets/s390x-softmmu.mak
@@ -1,4 +1,4 @@
TARGET_ARCH=s390x
TARGET_BIG_ENDIAN=y
TARGET_SUPPORTS_MTTCG=y
TARGET_XML_FILES= gdb-xml/s390x-core64.xml gdb-xml/s390-acr.xml gdb-xml/s390-fpr.xml gdb-xml/s390-vx.xml gdb-xml/s390-cr.xml gdb-xml/s390-virt.xml gdb-xml/s390-gs.xml
TARGET_XML_FILES= gdb-xml/s390x-core64.xml gdb-xml/s390-acr.xml gdb-xml/s390-fpr.xml gdb-xml/s390-vx.xml gdb-xml/s390-cr.xml gdb-xml/s390-virt.xml gdb-xml/s390-virt-kvm.xml gdb-xml/s390-gs.xml
7 changes: 7 additions & 0 deletions docs/devel/multiple-iothreads.txt
Expand Up @@ -61,6 +61,7 @@ There are several old APIs that use the main loop AioContext:
* LEGACY qemu_aio_set_event_notifier() - monitor an event notifier
* LEGACY timer_new_ms() - create a timer
* LEGACY qemu_bh_new() - create a BH
* LEGACY qemu_bh_new_guarded() - create a BH with a device re-entrancy guard
* LEGACY qemu_aio_wait() - run an event loop iteration

Since they implicitly work on the main loop they cannot be used in code that
Expand All @@ -72,8 +73,14 @@ Instead, use the AioContext functions directly (see include/block/aio.h):
* aio_set_event_notifier() - monitor an event notifier
* aio_timer_new() - create a timer
* aio_bh_new() - create a BH
* aio_bh_new_guarded() - create a BH with a device re-entrancy guard
* aio_poll() - run an event loop iteration

The qemu_bh_new_guarded/aio_bh_new_guarded APIs accept a "MemReentrancyGuard"
argument, which is used to check for and prevent re-entrancy problems. For
BHs associated with devices, the reentrancy-guard is contained in the
corresponding DeviceState and named "mem_reentrancy_guard".

The AioContext can be obtained from the IOThread using
iothread_get_aio_context() or for the main loop using qemu_get_aio_context().
Code that takes an AioContext argument works both in IOThreads or the main
Expand Down
14 changes: 14 additions & 0 deletions gdb-xml/s390-virt-kvm.xml
@@ -0,0 +1,14 @@
<?xml version="1.0"?>
<!-- Copyright 2023 IBM Corp.
This work is licensed under the terms of the GNU GPL, version 2 or
(at your option) any later version. See the COPYING file in the
top-level directory. -->

<!DOCTYPE feature SYSTEM "gdb-target.dtd">
<feature name="org.gnu.gdb.s390.virt.kvm">
<reg name="pp" bitsize="64" type="uint64" group="system"/>
<reg name="pfault_token" bitsize="64" type="uint64" group="system"/>
<reg name="pfault_select" bitsize="64" type="uint64" group="system"/>
<reg name="pfault_compare" bitsize="64" type="uint64" group="system"/>
</feature>
4 changes: 0 additions & 4 deletions gdb-xml/s390-virt.xml
Expand Up @@ -11,8 +11,4 @@
<reg name="cputm" bitsize="64" type="uint64" group="system"/>
<reg name="last_break" bitsize="64" type="code_ptr" group="system"/>
<reg name="prefix" bitsize="64" type="data_ptr" group="system"/>
<reg name="pp" bitsize="64" type="uint64" group="system"/>
<reg name="pfault_token" bitsize="64" type="uint64" group="system"/>
<reg name="pfault_select" bitsize="64" type="uint64" group="system"/>
<reg name="pfault_compare" bitsize="64" type="uint64" group="system"/>
</feature>
5 changes: 4 additions & 1 deletion hw/9pfs/xen-9p-backend.c
Expand Up @@ -61,6 +61,7 @@ typedef struct Xen9pfsDev {

int num_rings;
Xen9pfsRing *rings;
MemReentrancyGuard mem_reentrancy_guard;
} Xen9pfsDev;

static void xen_9pfs_disconnect(struct XenLegacyDevice *xendev);
Expand Down Expand Up @@ -443,7 +444,9 @@ static int xen_9pfs_connect(struct XenLegacyDevice *xendev)
xen_9pdev->rings[i].ring.out = xen_9pdev->rings[i].data +
XEN_FLEX_RING_SIZE(ring_order);

xen_9pdev->rings[i].bh = qemu_bh_new(xen_9pfs_bh, &xen_9pdev->rings[i]);
xen_9pdev->rings[i].bh = qemu_bh_new_guarded(xen_9pfs_bh,
&xen_9pdev->rings[i],
&xen_9pdev->mem_reentrancy_guard);
xen_9pdev->rings[i].out_cons = 0;
xen_9pdev->rings[i].out_size = 0;
xen_9pdev->rings[i].inprogress = false;
Expand Down
3 changes: 2 additions & 1 deletion hw/block/dataplane/virtio-blk.c
Expand Up @@ -127,7 +127,8 @@ bool virtio_blk_data_plane_create(VirtIODevice *vdev, VirtIOBlkConf *conf,
} else {
s->ctx = qemu_get_aio_context();
}
s->bh = aio_bh_new(s->ctx, notify_guest_bh, s);
s->bh = aio_bh_new_guarded(s->ctx, notify_guest_bh, s,
&DEVICE(vdev)->mem_reentrancy_guard);
s->batch_notify_vqs = bitmap_new(conf->num_queues);

*dataplane = s;
Expand Down
5 changes: 3 additions & 2 deletions hw/block/dataplane/xen-block.c
Expand Up @@ -633,8 +633,9 @@ XenBlockDataPlane *xen_block_dataplane_create(XenDevice *xendev,
} else {
dataplane->ctx = qemu_get_aio_context();
}
dataplane->bh = aio_bh_new(dataplane->ctx, xen_block_dataplane_bh,
dataplane);
dataplane->bh = aio_bh_new_guarded(dataplane->ctx, xen_block_dataplane_bh,
dataplane,
&DEVICE(xendev)->mem_reentrancy_guard);

return dataplane;
}
Expand Down
3 changes: 2 additions & 1 deletion hw/char/virtio-serial-bus.c
Expand Up @@ -985,7 +985,8 @@ static void virtser_port_device_realize(DeviceState *dev, Error **errp)
return;
}

port->bh = qemu_bh_new(flush_queued_data_bh, port);
port->bh = qemu_bh_new_guarded(flush_queued_data_bh, port,
&dev->mem_reentrancy_guard);
port->elem = NULL;
}

Expand Down
9 changes: 6 additions & 3 deletions hw/display/qxl.c
Expand Up @@ -2201,11 +2201,14 @@ static void qxl_realize_common(PCIQXLDevice *qxl, Error **errp)

qemu_add_vm_change_state_handler(qxl_vm_change_state_handler, qxl);

qxl->update_irq = qemu_bh_new(qxl_update_irq_bh, qxl);
qxl->update_irq = qemu_bh_new_guarded(qxl_update_irq_bh, qxl,
&DEVICE(qxl)->mem_reentrancy_guard);
qxl_reset_state(qxl);

qxl->update_area_bh = qemu_bh_new(qxl_render_update_area_bh, qxl);
qxl->ssd.cursor_bh = qemu_bh_new(qemu_spice_cursor_refresh_bh, &qxl->ssd);
qxl->update_area_bh = qemu_bh_new_guarded(qxl_render_update_area_bh, qxl,
&DEVICE(qxl)->mem_reentrancy_guard);
qxl->ssd.cursor_bh = qemu_bh_new_guarded(qemu_spice_cursor_refresh_bh, &qxl->ssd,
&DEVICE(qxl)->mem_reentrancy_guard);
}

static void qxl_realize_primary(PCIDevice *dev, Error **errp)
Expand Down
6 changes: 4 additions & 2 deletions hw/display/virtio-gpu.c
Expand Up @@ -1339,8 +1339,10 @@ void virtio_gpu_device_realize(DeviceState *qdev, Error **errp)

g->ctrl_vq = virtio_get_queue(vdev, 0);
g->cursor_vq = virtio_get_queue(vdev, 1);
g->ctrl_bh = qemu_bh_new(virtio_gpu_ctrl_bh, g);
g->cursor_bh = qemu_bh_new(virtio_gpu_cursor_bh, g);
g->ctrl_bh = qemu_bh_new_guarded(virtio_gpu_ctrl_bh, g,
&qdev->mem_reentrancy_guard);
g->cursor_bh = qemu_bh_new_guarded(virtio_gpu_cursor_bh, g,
&qdev->mem_reentrancy_guard);
QTAILQ_INIT(&g->reslist);
QTAILQ_INIT(&g->cmdq);
QTAILQ_INIT(&g->fenceq);
Expand Down
3 changes: 2 additions & 1 deletion hw/ide/ahci.c
Expand Up @@ -1509,7 +1509,8 @@ static void ahci_cmd_done(const IDEDMA *dma)
ahci_write_fis_d2h(ad);

if (ad->port_regs.cmd_issue && !ad->check_bh) {
ad->check_bh = qemu_bh_new(ahci_check_cmd_bh, ad);
ad->check_bh = qemu_bh_new_guarded(ahci_check_cmd_bh, ad,
&ad->mem_reentrancy_guard);
qemu_bh_schedule(ad->check_bh);
}
}
Expand Down
1 change: 1 addition & 0 deletions hw/ide/ahci_internal.h
Expand Up @@ -321,6 +321,7 @@ struct AHCIDevice {
bool init_d2h_sent;
AHCICmdHdr *cur_cmd;
NCQTransferState ncq_tfs[AHCI_MAX_CMDS];
MemReentrancyGuard mem_reentrancy_guard;
};

struct AHCIPCIState {
Expand Down
4 changes: 3 additions & 1 deletion hw/ide/core.c
Expand Up @@ -513,14 +513,16 @@ BlockAIOCB *ide_issue_trim(
BlockCompletionFunc *cb, void *cb_opaque, void *opaque)
{
IDEState *s = opaque;
IDEDevice *dev = s->unit ? s->bus->slave : s->bus->master;
TrimAIOCB *iocb;

/* Paired with a decrement in ide_trim_bh_cb() */
blk_inc_in_flight(s->blk);

iocb = blk_aio_get(&trim_aiocb_info, s->blk, cb, cb_opaque);
iocb->s = s;
iocb->bh = qemu_bh_new(ide_trim_bh_cb, iocb);
iocb->bh = qemu_bh_new_guarded(ide_trim_bh_cb, iocb,
&DEVICE(dev)->mem_reentrancy_guard);
iocb->ret = 0;
iocb->qiov = qiov;
iocb->i = -1;
Expand Down
7 changes: 7 additions & 0 deletions hw/intc/apic.c
Expand Up @@ -885,6 +885,13 @@ static void apic_realize(DeviceState *dev, Error **errp)
memory_region_init_io(&s->io_memory, OBJECT(s), &apic_io_ops, s, "apic-msi",
APIC_SPACE_SIZE);

/*
* apic-msi's apic_mem_write can call into ioapic_eoi_broadcast, which can
* write back to apic-msi. As such mark the apic-msi region re-entrancy
* safe.
*/
s->io_memory.disable_reentrancy_guard = true;

s->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, apic_timer, s);
local_apics[s->id] = s;

Expand Down
7 changes: 7 additions & 0 deletions hw/misc/bcm2835_property.c
Expand Up @@ -382,6 +382,13 @@ static void bcm2835_property_init(Object *obj)

memory_region_init_io(&s->iomem, OBJECT(s), &bcm2835_property_ops, s,
TYPE_BCM2835_PROPERTY, 0x10);

/*
* bcm2835_property_ops call into bcm2835_mbox, which in-turn reads from
* iomem. As such, mark iomem as re-entracy safe.
*/
s->iomem.disable_reentrancy_guard = true;

sysbus_init_mmio(SYS_BUS_DEVICE(s), &s->iomem);
sysbus_init_irq(SYS_BUS_DEVICE(s), &s->mbox_irq);
}
Expand Down
6 changes: 4 additions & 2 deletions hw/misc/imx_rngc.c
Expand Up @@ -228,8 +228,10 @@ static void imx_rngc_realize(DeviceState *dev, Error **errp)
sysbus_init_mmio(sbd, &s->iomem);

sysbus_init_irq(sbd, &s->irq);
s->self_test_bh = qemu_bh_new(imx_rngc_self_test, s);
s->seed_bh = qemu_bh_new(imx_rngc_seed, s);
s->self_test_bh = qemu_bh_new_guarded(imx_rngc_self_test, s,
&dev->mem_reentrancy_guard);
s->seed_bh = qemu_bh_new_guarded(imx_rngc_seed, s,
&dev->mem_reentrancy_guard);
}

static void imx_rngc_reset(DeviceState *dev)
Expand Down
2 changes: 1 addition & 1 deletion hw/misc/macio/mac_dbdma.c
Expand Up @@ -914,7 +914,7 @@ static void mac_dbdma_realize(DeviceState *dev, Error **errp)
{
DBDMAState *s = MAC_DBDMA(dev);

s->bh = qemu_bh_new(DBDMA_run_bh, s);
s->bh = qemu_bh_new_guarded(DBDMA_run_bh, s, &dev->mem_reentrancy_guard);
}

static void mac_dbdma_class_init(ObjectClass *oc, void *data)
Expand Down
3 changes: 2 additions & 1 deletion hw/net/virtio-net.c
Expand Up @@ -2917,7 +2917,8 @@ static void virtio_net_add_queue(VirtIONet *n, int index)
n->vqs[index].tx_vq =
virtio_add_queue(vdev, n->net_conf.tx_queue_size,
virtio_net_handle_tx_bh);
n->vqs[index].tx_bh = qemu_bh_new(virtio_net_tx_bh, &n->vqs[index]);
n->vqs[index].tx_bh = qemu_bh_new_guarded(virtio_net_tx_bh, &n->vqs[index],
&DEVICE(vdev)->mem_reentrancy_guard);
}

n->vqs[index].tx_waiting = 0;
Expand Down
6 changes: 4 additions & 2 deletions hw/nvme/ctrl.c
Expand Up @@ -4607,7 +4607,8 @@ static void nvme_init_sq(NvmeSQueue *sq, NvmeCtrl *n, uint64_t dma_addr,
QTAILQ_INSERT_TAIL(&(sq->req_list), &sq->io_req[i], entry);
}

sq->bh = qemu_bh_new(nvme_process_sq, sq);
sq->bh = qemu_bh_new_guarded(nvme_process_sq, sq,
&DEVICE(sq->ctrl)->mem_reentrancy_guard);

if (n->dbbuf_enabled) {
sq->db_addr = n->dbbuf_dbs + (sqid << 3);
Expand Down Expand Up @@ -5253,7 +5254,8 @@ static void nvme_init_cq(NvmeCQueue *cq, NvmeCtrl *n, uint64_t dma_addr,
}
}
n->cq[cqid] = cq;
cq->bh = qemu_bh_new(nvme_post_cqes, cq);
cq->bh = qemu_bh_new_guarded(nvme_post_cqes, cq,
&DEVICE(cq->ctrl)->mem_reentrancy_guard);
}

static uint16_t nvme_create_cq(NvmeCtrl *n, NvmeRequest *req)
Expand Down
7 changes: 7 additions & 0 deletions hw/pci-host/raven.c
Expand Up @@ -294,6 +294,13 @@ static void raven_pcihost_initfn(Object *obj)
memory_region_init(&s->pci_memory, obj, "pci-memory", 0x3f000000);
address_space_init(&s->pci_io_as, &s->pci_io, "raven-io");

/*
* Raven's raven_io_ops use the address-space API to access pci-conf-idx
* (which is also owned by the raven device). As such, mark the
* pci_io_non_contiguous as re-entrancy safe.
*/
s->pci_io_non_contiguous.disable_reentrancy_guard = true;

/* CPU address space */
memory_region_add_subregion(address_space_mem, PCI_IO_BASE_ADDR,
&s->pci_io);
Expand Down
2 changes: 1 addition & 1 deletion hw/rdma/Kconfig
@@ -1,3 +1,3 @@
config VMW_PVRDMA
default y if PCI_DEVICES
depends on PVRDMA && PCI && MSI_NONBROKEN
depends on PVRDMA && MSI_NONBROKEN && VMXNET3_PCI
8 changes: 5 additions & 3 deletions hw/rdma/meson.build
@@ -1,10 +1,12 @@
specific_ss.add(when: 'CONFIG_VMW_PVRDMA', if_true: files(
softmmu_ss.add(when: 'CONFIG_VMW_PVRDMA', if_true: files(
'rdma.c',
'rdma_backend.c',
'rdma_rm.c',
'rdma_utils.c',
'vmw/pvrdma_qp_ops.c',
))
specific_ss.add(when: 'CONFIG_VMW_PVRDMA', if_true: files(
'rdma_rm.c',
'vmw/pvrdma_cmd.c',
'vmw/pvrdma_dev_ring.c',
'vmw/pvrdma_main.c',
'vmw/pvrdma_qp_ops.c',
))
4 changes: 0 additions & 4 deletions hw/rdma/rdma_rm.c
Expand Up @@ -23,10 +23,6 @@
#include "rdma_backend.h"
#include "rdma_rm.h"

/* Page directory and page tables */
#define PG_DIR_SZ { TARGET_PAGE_SIZE / sizeof(__u64) }
#define PG_TBL_SZ { TARGET_PAGE_SIZE / sizeof(__u64) }

void rdma_format_device_counters(RdmaDeviceResources *dev_res, GString *buf)
{
g_string_append_printf(buf, "\ttx : %" PRId64 "\n",
Expand Down
6 changes: 6 additions & 0 deletions hw/scsi/lsi53c895a.c
Expand Up @@ -2302,6 +2302,12 @@ static void lsi_scsi_realize(PCIDevice *dev, Error **errp)
memory_region_init_io(&s->io_io, OBJECT(s), &lsi_io_ops, s,
"lsi-io", 256);

/*
* Since we use the address-space API to interact with ram_io, disable the
* re-entrancy guard.
*/
s->ram_io.disable_reentrancy_guard = true;

address_space_init(&s->pci_io_as, pci_address_space_io(dev), "lsi-pci-io");
qdev_init_gpio_out(d, &s->ext_irq, 1);

Expand Down
3 changes: 2 additions & 1 deletion hw/scsi/mptsas.c
Expand Up @@ -1322,7 +1322,8 @@ static void mptsas_scsi_realize(PCIDevice *dev, Error **errp)
}
s->max_devices = MPTSAS_NUM_PORTS;

s->request_bh = qemu_bh_new(mptsas_fetch_requests, s);
s->request_bh = qemu_bh_new_guarded(mptsas_fetch_requests, s,
&DEVICE(dev)->mem_reentrancy_guard);

scsi_bus_init(&s->bus, sizeof(s->bus), &dev->qdev, &mptsas_scsi_info);
}
Expand Down
3 changes: 2 additions & 1 deletion hw/scsi/scsi-bus.c
Expand Up @@ -193,7 +193,8 @@ static void scsi_dma_restart_cb(void *opaque, bool running, RunState state)
AioContext *ctx = blk_get_aio_context(s->conf.blk);
/* The reference is dropped in scsi_dma_restart_bh.*/
object_ref(OBJECT(s));
s->bh = aio_bh_new(ctx, scsi_dma_restart_bh, s);
s->bh = aio_bh_new_guarded(ctx, scsi_dma_restart_bh, s,
&DEVICE(s)->mem_reentrancy_guard);
qemu_bh_schedule(s->bh);
}
}
Expand Down
3 changes: 2 additions & 1 deletion hw/scsi/vmw_pvscsi.c
Expand Up @@ -1184,7 +1184,8 @@ pvscsi_realizefn(PCIDevice *pci_dev, Error **errp)
pcie_endpoint_cap_init(pci_dev, PVSCSI_EXP_EP_OFFSET);
}

s->completion_worker = qemu_bh_new(pvscsi_process_completion_queue, s);
s->completion_worker = qemu_bh_new_guarded(pvscsi_process_completion_queue, s,
&DEVICE(pci_dev)->mem_reentrancy_guard);

scsi_bus_init(&s->bus, sizeof(s->bus), DEVICE(pci_dev), &pvscsi_scsi_info);
/* override default SCSI bus hotplug-handler, with pvscsi's one */
Expand Down
3 changes: 2 additions & 1 deletion hw/usb/dev-uas.c
Expand Up @@ -937,7 +937,8 @@ static void usb_uas_realize(USBDevice *dev, Error **errp)

QTAILQ_INIT(&uas->results);
QTAILQ_INIT(&uas->requests);
uas->status_bh = qemu_bh_new(usb_uas_send_status_bh, uas);
uas->status_bh = qemu_bh_new_guarded(usb_uas_send_status_bh, uas,
&d->mem_reentrancy_guard);

dev->flags |= (1 << USB_DEV_FLAG_IS_SCSI_STORAGE);
scsi_bus_init(&uas->bus, sizeof(uas->bus), DEVICE(dev), &usb_uas_scsi_info);
Expand Down
3 changes: 2 additions & 1 deletion hw/usb/hcd-dwc2.c
Expand Up @@ -1364,7 +1364,8 @@ static void dwc2_realize(DeviceState *dev, Error **errp)
s->fi = USB_FRMINTVL - 1;
s->eof_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, dwc2_frame_boundary, s);
s->frame_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, dwc2_work_timer, s);
s->async_bh = qemu_bh_new(dwc2_work_bh, s);
s->async_bh = qemu_bh_new_guarded(dwc2_work_bh, s,
&dev->mem_reentrancy_guard);

sysbus_init_irq(sbd, &s->irq);
}
Expand Down
3 changes: 2 additions & 1 deletion hw/usb/hcd-ehci.c
Expand Up @@ -2533,7 +2533,8 @@ void usb_ehci_realize(EHCIState *s, DeviceState *dev, Error **errp)
}

s->frame_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, ehci_work_timer, s);
s->async_bh = qemu_bh_new(ehci_work_bh, s);
s->async_bh = qemu_bh_new_guarded(ehci_work_bh, s,
&dev->mem_reentrancy_guard);
s->device = dev;

s->vmstate = qemu_add_vm_change_state_handler(usb_ehci_vm_state_change, s);
Expand Down
2 changes: 1 addition & 1 deletion hw/usb/hcd-uhci.c
Expand Up @@ -1190,7 +1190,7 @@ void usb_uhci_common_realize(PCIDevice *dev, Error **errp)
USB_SPEED_MASK_LOW | USB_SPEED_MASK_FULL);
}
}
s->bh = qemu_bh_new(uhci_bh, s);
s->bh = qemu_bh_new_guarded(uhci_bh, s, &DEVICE(dev)->mem_reentrancy_guard);
s->frame_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, uhci_frame_timer, s);
s->num_ports_vmstate = NB_PORTS;
QTAILQ_INIT(&s->queues);
Expand Down