Skip to content

Commit

Permalink
async: Add an optional reentrancy guard to the BH API
Browse files Browse the repository at this point in the history
Devices can pass their MemoryReentrancyGuard (from their DeviceState),
when creating new BHes. Then, the async API will toggle the guard
before/after calling the BH call-back. This prevents bh->mmio reentrancy
issues.

Signed-off-by: Alexander Bulekov <alxndr@bu.edu>
Reviewed-by: Darren Kenny <darren.kenny@oracle.com>
Message-Id: <20230427211013.2994127-3-alxndr@bu.edu>
[thuth: Fix "line over 90 characters" checkpatch.pl error]
Signed-off-by: Thomas Huth <thuth@redhat.com>
  • Loading branch information
a1xndr authored and huth committed Apr 28, 2023
1 parent a2e1753 commit 9c86c97
Show file tree
Hide file tree
Showing 7 changed files with 52 additions and 8 deletions.
7 changes: 7 additions & 0 deletions docs/devel/multiple-iothreads.txt
Expand Up @@ -61,6 +61,7 @@ There are several old APIs that use the main loop AioContext:
* LEGACY qemu_aio_set_event_notifier() - monitor an event notifier
* LEGACY timer_new_ms() - create a timer
* LEGACY qemu_bh_new() - create a BH
* LEGACY qemu_bh_new_guarded() - create a BH with a device re-entrancy guard
* LEGACY qemu_aio_wait() - run an event loop iteration

Since they implicitly work on the main loop they cannot be used in code that
Expand All @@ -72,8 +73,14 @@ Instead, use the AioContext functions directly (see include/block/aio.h):
* aio_set_event_notifier() - monitor an event notifier
* aio_timer_new() - create a timer
* aio_bh_new() - create a BH
* aio_bh_new_guarded() - create a BH with a device re-entrancy guard
* aio_poll() - run an event loop iteration

The qemu_bh_new_guarded/aio_bh_new_guarded APIs accept a "MemReentrancyGuard"
argument, which is used to check for and prevent re-entrancy problems. For
BHs associated with devices, the reentrancy-guard is contained in the
corresponding DeviceState and named "mem_reentrancy_guard".

The AioContext can be obtained from the IOThread using
iothread_get_aio_context() or for the main loop using qemu_get_aio_context().
Code that takes an AioContext argument works both in IOThreads or the main
Expand Down
18 changes: 16 additions & 2 deletions include/block/aio.h
Expand Up @@ -23,6 +23,8 @@
#include "qemu/thread.h"
#include "qemu/timer.h"
#include "block/graph-lock.h"
#include "hw/qdev-core.h"


typedef struct BlockAIOCB BlockAIOCB;
typedef void BlockCompletionFunc(void *opaque, int ret);
Expand Down Expand Up @@ -323,9 +325,11 @@ void aio_bh_schedule_oneshot_full(AioContext *ctx, QEMUBHFunc *cb, void *opaque,
* is opaque and must be allocated prior to its use.
*
* @name: A human-readable identifier for debugging purposes.
* @reentrancy_guard: A guard set when entering a cb to prevent
* device-reentrancy issues
*/
QEMUBH *aio_bh_new_full(AioContext *ctx, QEMUBHFunc *cb, void *opaque,
const char *name);
const char *name, MemReentrancyGuard *reentrancy_guard);

/**
* aio_bh_new: Allocate a new bottom half structure
Expand All @@ -334,7 +338,17 @@ QEMUBH *aio_bh_new_full(AioContext *ctx, QEMUBHFunc *cb, void *opaque,
* string.
*/
#define aio_bh_new(ctx, cb, opaque) \
aio_bh_new_full((ctx), (cb), (opaque), (stringify(cb)))
aio_bh_new_full((ctx), (cb), (opaque), (stringify(cb)), NULL)

/**
* aio_bh_new_guarded: Allocate a new bottom half structure with a
* reentrancy_guard
*
* A convenience wrapper for aio_bh_new_full() that uses the cb as the name
* string.
*/
#define aio_bh_new_guarded(ctx, cb, opaque, guard) \
aio_bh_new_full((ctx), (cb), (opaque), (stringify(cb)), guard)

/**
* aio_notify: Force processing of pending events.
Expand Down
7 changes: 5 additions & 2 deletions include/qemu/main-loop.h
Expand Up @@ -387,9 +387,12 @@ void qemu_cond_timedwait_iothread(QemuCond *cond, int ms);

/* internal interfaces */

#define qemu_bh_new_guarded(cb, opaque, guard) \
qemu_bh_new_full((cb), (opaque), (stringify(cb)), guard)
#define qemu_bh_new(cb, opaque) \
qemu_bh_new_full((cb), (opaque), (stringify(cb)))
QEMUBH *qemu_bh_new_full(QEMUBHFunc *cb, void *opaque, const char *name);
qemu_bh_new_full((cb), (opaque), (stringify(cb)), NULL)
QEMUBH *qemu_bh_new_full(QEMUBHFunc *cb, void *opaque, const char *name,
MemReentrancyGuard *reentrancy_guard);
void qemu_bh_schedule_idle(QEMUBH *bh);

enum {
Expand Down
3 changes: 2 additions & 1 deletion tests/unit/ptimer-test-stubs.c
Expand Up @@ -107,7 +107,8 @@ int64_t qemu_clock_deadline_ns_all(QEMUClockType type, int attr_mask)
return deadline;
}

QEMUBH *qemu_bh_new_full(QEMUBHFunc *cb, void *opaque, const char *name)
QEMUBH *qemu_bh_new_full(QEMUBHFunc *cb, void *opaque, const char *name,
MemReentrancyGuard *reentrancy_guard)
{
QEMUBH *bh = g_new(QEMUBH, 1);

Expand Down
18 changes: 17 additions & 1 deletion util/async.c
Expand Up @@ -65,6 +65,7 @@ struct QEMUBH {
void *opaque;
QSLIST_ENTRY(QEMUBH) next;
unsigned flags;
MemReentrancyGuard *reentrancy_guard;
};

/* Called concurrently from any thread */
Expand Down Expand Up @@ -137,7 +138,7 @@ void aio_bh_schedule_oneshot_full(AioContext *ctx, QEMUBHFunc *cb,
}

QEMUBH *aio_bh_new_full(AioContext *ctx, QEMUBHFunc *cb, void *opaque,
const char *name)
const char *name, MemReentrancyGuard *reentrancy_guard)
{
QEMUBH *bh;
bh = g_new(QEMUBH, 1);
Expand All @@ -146,13 +147,28 @@ QEMUBH *aio_bh_new_full(AioContext *ctx, QEMUBHFunc *cb, void *opaque,
.cb = cb,
.opaque = opaque,
.name = name,
.reentrancy_guard = reentrancy_guard,
};
return bh;
}

void aio_bh_call(QEMUBH *bh)
{
bool last_engaged_in_io = false;

if (bh->reentrancy_guard) {
last_engaged_in_io = bh->reentrancy_guard->engaged_in_io;
if (bh->reentrancy_guard->engaged_in_io) {
trace_reentrant_aio(bh->ctx, bh->name);
}
bh->reentrancy_guard->engaged_in_io = true;
}

bh->cb(bh->opaque);

if (bh->reentrancy_guard) {
bh->reentrancy_guard->engaged_in_io = last_engaged_in_io;
}
}

/* Multiple occurrences of aio_bh_poll cannot be called concurrently. */
Expand Down
6 changes: 4 additions & 2 deletions util/main-loop.c
Expand Up @@ -605,9 +605,11 @@ void main_loop_wait(int nonblocking)

/* Functions to operate on the main QEMU AioContext. */

QEMUBH *qemu_bh_new_full(QEMUBHFunc *cb, void *opaque, const char *name)
QEMUBH *qemu_bh_new_full(QEMUBHFunc *cb, void *opaque, const char *name,
MemReentrancyGuard *reentrancy_guard)
{
return aio_bh_new_full(qemu_aio_context, cb, opaque, name);
return aio_bh_new_full(qemu_aio_context, cb, opaque, name,
reentrancy_guard);
}

/*
Expand Down
1 change: 1 addition & 0 deletions util/trace-events
Expand Up @@ -11,6 +11,7 @@ poll_remove(void *ctx, void *node, int fd) "ctx %p node %p fd %d"
# async.c
aio_co_schedule(void *ctx, void *co) "ctx %p co %p"
aio_co_schedule_bh_cb(void *ctx, void *co) "ctx %p co %p"
reentrant_aio(void *ctx, const char *name) "ctx %p name %s"

# thread-pool.c
thread_pool_submit(void *pool, void *req, void *opaque) "pool %p req %p opaque %p"
Expand Down

0 comments on commit 9c86c97

Please sign in to comment.