Skip to content

Commit

Permalink
dma-helpers: Fix race condition of continue_after_map_failure and dma…
Browse files Browse the repository at this point in the history
…_aio_cancel

If DMA's owning thread cancels the IO while the bounce buffer's owning thread
is notifying the "cpu client list", a use-after-free happens:

     continue_after_map_failure               dma_aio_cancel
     ------------------------------------------------------------------
     aio_bh_new
                                              qemu_bh_delete
     qemu_bh_schedule (use after free)

Also, the old code doesn't run the bh in the right AioContext.

Fix both problems by passing a QEMUBH to cpu_register_map_client.

Signed-off-by: Fam Zheng <famz@redhat.com>
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Message-Id: <1426496617-10702-6-git-send-email-famz@redhat.com>
[Remove unnecessary forward declaration. - Paolo]
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
  • Loading branch information
Fam Zheng authored and bonzini committed Apr 27, 2015
1 parent 33b6c2e commit e95205e
Show file tree
Hide file tree
Showing 3 changed files with 31 additions and 23 deletions.
17 changes: 8 additions & 9 deletions dma-helpers.c
Expand Up @@ -92,14 +92,6 @@ static void reschedule_dma(void *opaque)
dma_blk_cb(dbs, 0);
}

static void continue_after_map_failure(void *opaque)
{
DMAAIOCB *dbs = (DMAAIOCB *)opaque;

dbs->bh = qemu_bh_new(reschedule_dma, dbs);
qemu_bh_schedule(dbs->bh);
}

static void dma_blk_unmap(DMAAIOCB *dbs)
{
int i;
Expand Down Expand Up @@ -161,7 +153,9 @@ static void dma_blk_cb(void *opaque, int ret)

if (dbs->iov.size == 0) {
trace_dma_map_wait(dbs);
cpu_register_map_client(dbs, continue_after_map_failure);
dbs->bh = aio_bh_new(blk_get_aio_context(dbs->blk),
reschedule_dma, dbs);
cpu_register_map_client(dbs->bh);
return;
}

Expand All @@ -183,6 +177,11 @@ static void dma_aio_cancel(BlockAIOCB *acb)
if (dbs->acb) {
blk_aio_cancel_async(dbs->acb);
}
if (dbs->bh) {
cpu_unregister_map_client(dbs->bh);
qemu_bh_delete(dbs->bh);
dbs->bh = NULL;
}
}


Expand Down
34 changes: 21 additions & 13 deletions exec.c
Expand Up @@ -2479,40 +2479,42 @@ typedef struct {
static BounceBuffer bounce;

typedef struct MapClient {
void *opaque;
void (*callback)(void *opaque);
QEMUBH *bh;
QLIST_ENTRY(MapClient) link;
} MapClient;

QemuMutex map_client_list_lock;
static QLIST_HEAD(map_client_list, MapClient) map_client_list
= QLIST_HEAD_INITIALIZER(map_client_list);

static void cpu_unregister_map_client(void *_client);
static void cpu_unregister_map_client_do(MapClient *client)
{
QLIST_REMOVE(client, link);
g_free(client);
}

static void cpu_notify_map_clients_locked(void)
{
MapClient *client;

while (!QLIST_EMPTY(&map_client_list)) {
client = QLIST_FIRST(&map_client_list);
client->callback(client->opaque);
cpu_unregister_map_client(client);
qemu_bh_schedule(client->bh);
cpu_unregister_map_client_do(client);
}
}

void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
void cpu_register_map_client(QEMUBH *bh)
{
MapClient *client = g_malloc(sizeof(*client));

qemu_mutex_lock(&map_client_list_lock);
client->opaque = opaque;
client->callback = callback;
client->bh = bh;
QLIST_INSERT_HEAD(&map_client_list, client, link);
if (!atomic_read(&bounce.in_use)) {
cpu_notify_map_clients_locked();
}
qemu_mutex_unlock(&map_client_list_lock);
return client;
}

void cpu_exec_init_all(void)
Expand All @@ -2523,12 +2525,18 @@ void cpu_exec_init_all(void)
qemu_mutex_init(&map_client_list_lock);
}

static void cpu_unregister_map_client(void *_client)
void cpu_unregister_map_client(QEMUBH *bh)
{
MapClient *client = (MapClient *)_client;
MapClient *client;

QLIST_REMOVE(client, link);
g_free(client);
qemu_mutex_lock(&map_client_list_lock);
QLIST_FOREACH(client, &map_client_list, link) {
if (client->bh == bh) {
cpu_unregister_map_client_do(client);
break;
}
}
qemu_mutex_unlock(&map_client_list_lock);
}

static void cpu_notify_map_clients(void)
Expand Down
3 changes: 2 additions & 1 deletion include/exec/cpu-common.h
Expand Up @@ -82,7 +82,8 @@ void *cpu_physical_memory_map(hwaddr addr,
int is_write);
void cpu_physical_memory_unmap(void *buffer, hwaddr len,
int is_write, hwaddr access_len);
void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque));
void cpu_register_map_client(QEMUBH *bh);
void cpu_unregister_map_client(QEMUBH *bh);

bool cpu_physical_memory_is_io(hwaddr phys_addr);

Expand Down

0 comments on commit e95205e

Please sign in to comment.