Skip to content

Commit

Permalink
Merge remote-tracking branch 'remotes/kevin/tags/for-upstream' into s…
Browse files Browse the repository at this point in the history
…taging

Block patches

# gpg: Signature made Tue Apr 28 15:35:05 2015 BST using RSA key ID C88F2FD6
# gpg: Good signature from "Kevin Wolf <kwolf@redhat.com>"

* remotes/kevin/tags/for-upstream: (76 commits)
  block: move I/O request processing to block/io.c
  block: extract bdrv_setup_io_funcs()
  block: add bdrv_set_dirty()/bdrv_reset_dirty() to block_int.h
  block: replace bdrv_states iteration with bdrv_next()
  vmdk: Widen before shifting 32 bit header field
  block/dmg: make it modular
  block/mirror: Always call block_job_sleep_ns()
  iotests: add incremental backup granularity tests
  iotests: add incremental backup failure recovery test
  iotests: add simple incremental backup case
  iotests: add QMP event waiting queue
  iotests: add invalid input incremental backup tests
  hbitmap: truncate tests
  block: Resize bitmaps on bdrv_truncate
  block: Ensure consistent bitmap function prototypes
  block: add BdrvDirtyBitmap documentation
  qmp: Add dirty bitmap status field in query-block
  qmp: add block-dirty-bitmap-clear
  qmp: Add support of "dirty-bitmap" sync mode for drive-backup
  block: Add bitmap successors
  ...

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
  • Loading branch information
pm215 committed Apr 28, 2015
2 parents 84cbd63 + 61007b3 commit a9392bc
Show file tree
Hide file tree
Showing 91 changed files with 6,033 additions and 3,142 deletions.
12 changes: 12 additions & 0 deletions MAINTAINERS
Expand Up @@ -1182,7 +1182,19 @@ S: Supported
F: block/gluster.c
T: git git://github.com/codyprime/qemu-kvm-jtc.git block

Null Block Driver
M: Fam Zheng <famz@redhat.com>
L: qemu-block@nongnu.org
S: Supported
F: block/null.c

Bootdevice
M: Gonglei <arei.gonglei@huawei.com>
S: Maintained
F: bootdevice.c

Quorum
M: Alberto Garcia <berto@igalia.com>
S: Supported
F: block/quorum.c
L: qemu-block@nongnu.org
87 changes: 66 additions & 21 deletions aio-posix.c
Expand Up @@ -24,7 +24,6 @@ struct AioHandler
IOHandler *io_read;
IOHandler *io_write;
int deleted;
int pollfds_idx;
void *opaque;
QLIST_ENTRY(AioHandler) node;
};
Expand Down Expand Up @@ -83,7 +82,6 @@ void aio_set_fd_handler(AioContext *ctx,
node->io_read = io_read;
node->io_write = io_write;
node->opaque = opaque;
node->pollfds_idx = -1;

node->pfd.events = (io_read ? G_IO_IN | G_IO_HUP | G_IO_ERR : 0);
node->pfd.events |= (io_write ? G_IO_OUT | G_IO_ERR : 0);
Expand Down Expand Up @@ -186,13 +184,61 @@ bool aio_dispatch(AioContext *ctx)
return progress;
}

/* These thread-local variables are used only in a small part of aio_poll
* around the call to the poll() system call. In particular they are not
* used while aio_poll is performing callbacks, which makes it much easier
* to think about reentrancy!
*
* Stack-allocated arrays would be perfect but they have size limitations;
* heap allocation is expensive enough that we want to reuse arrays across
* calls to aio_poll(). And because poll() has to be called without holding
* any lock, the arrays cannot be stored in AioContext. Thread-local data
* has none of the disadvantages of these three options.
*/
static __thread GPollFD *pollfds;
static __thread AioHandler **nodes;
static __thread unsigned npfd, nalloc;
static __thread Notifier pollfds_cleanup_notifier;

static void pollfds_cleanup(Notifier *n, void *unused)
{
g_assert(npfd == 0);
g_free(pollfds);
g_free(nodes);
nalloc = 0;
}

static void add_pollfd(AioHandler *node)
{
if (npfd == nalloc) {
if (nalloc == 0) {
pollfds_cleanup_notifier.notify = pollfds_cleanup;
qemu_thread_atexit_add(&pollfds_cleanup_notifier);
nalloc = 8;
} else {
g_assert(nalloc <= INT_MAX);
nalloc *= 2;
}
pollfds = g_renew(GPollFD, pollfds, nalloc);
nodes = g_renew(AioHandler *, nodes, nalloc);
}
nodes[npfd] = node;
pollfds[npfd] = (GPollFD) {
.fd = node->pfd.fd,
.events = node->pfd.events,
};
npfd++;
}

bool aio_poll(AioContext *ctx, bool blocking)
{
AioHandler *node;
bool was_dispatching;
int ret;
int i, ret;
bool progress;
int64_t timeout;

aio_context_acquire(ctx);
was_dispatching = ctx->dispatching;
progress = false;

Expand All @@ -210,45 +256,44 @@ bool aio_poll(AioContext *ctx, bool blocking)

ctx->walking_handlers++;

g_array_set_size(ctx->pollfds, 0);
assert(npfd == 0);

/* fill pollfds */
QLIST_FOREACH(node, &ctx->aio_handlers, node) {
node->pollfds_idx = -1;
if (!node->deleted && node->pfd.events) {
GPollFD pfd = {
.fd = node->pfd.fd,
.events = node->pfd.events,
};
node->pollfds_idx = ctx->pollfds->len;
g_array_append_val(ctx->pollfds, pfd);
add_pollfd(node);
}
}

ctx->walking_handlers--;
timeout = blocking ? aio_compute_timeout(ctx) : 0;

/* wait until next event */
ret = qemu_poll_ns((GPollFD *)ctx->pollfds->data,
ctx->pollfds->len,
blocking ? aio_compute_timeout(ctx) : 0);
if (timeout) {
aio_context_release(ctx);
}
ret = qemu_poll_ns((GPollFD *)pollfds, npfd, timeout);
if (timeout) {
aio_context_acquire(ctx);
}

/* if we have any readable fds, dispatch event */
if (ret > 0) {
QLIST_FOREACH(node, &ctx->aio_handlers, node) {
if (node->pollfds_idx != -1) {
GPollFD *pfd = &g_array_index(ctx->pollfds, GPollFD,
node->pollfds_idx);
node->pfd.revents = pfd->revents;
}
for (i = 0; i < npfd; i++) {
nodes[i]->pfd.revents = pollfds[i].revents;
}
}

npfd = 0;
ctx->walking_handlers--;

/* Run dispatch even if there were no readable fds to run timers */
aio_set_dispatching(ctx, true);
if (aio_dispatch(ctx)) {
progress = true;
}

aio_set_dispatching(ctx, was_dispatching);
aio_context_release(ctx);

return progress;
}
8 changes: 8 additions & 0 deletions aio-win32.c
Expand Up @@ -283,6 +283,7 @@ bool aio_poll(AioContext *ctx, bool blocking)
int count;
int timeout;

aio_context_acquire(ctx);
have_select_revents = aio_prepare(ctx);
if (have_select_revents) {
blocking = false;
Expand Down Expand Up @@ -323,7 +324,13 @@ bool aio_poll(AioContext *ctx, bool blocking)

timeout = blocking
? qemu_timeout_ns_to_ms(aio_compute_timeout(ctx)) : 0;
if (timeout) {
aio_context_release(ctx);
}
ret = WaitForMultipleObjects(count, events, FALSE, timeout);
if (timeout) {
aio_context_acquire(ctx);
}
aio_set_dispatching(ctx, true);

if (first && aio_bh_poll(ctx)) {
Expand All @@ -349,5 +356,6 @@ bool aio_poll(AioContext *ctx, bool blocking)
progress |= timerlistgroup_run_timers(&ctx->tlg);

aio_set_dispatching(ctx, was_dispatching);
aio_context_release(ctx);
return progress;
}
10 changes: 1 addition & 9 deletions async.c
Expand Up @@ -230,7 +230,6 @@ aio_ctx_finalize(GSource *source)
event_notifier_cleanup(&ctx->notifier);
rfifolock_destroy(&ctx->lock);
qemu_mutex_destroy(&ctx->bh_lock);
g_array_free(ctx->pollfds, TRUE);
timerlistgroup_deinit(&ctx->tlg);
}

Expand Down Expand Up @@ -281,12 +280,6 @@ static void aio_timerlist_notify(void *opaque)
aio_notify(opaque);
}

static void aio_rfifolock_cb(void *opaque)
{
/* Kick owner thread in case they are blocked in aio_poll() */
aio_notify(opaque);
}

AioContext *aio_context_new(Error **errp)
{
int ret;
Expand All @@ -302,10 +295,9 @@ AioContext *aio_context_new(Error **errp)
aio_set_event_notifier(ctx, &ctx->notifier,
(EventNotifierHandler *)
event_notifier_test_and_clear);
ctx->pollfds = g_array_new(FALSE, FALSE, sizeof(GPollFD));
ctx->thread_pool = NULL;
qemu_mutex_init(&ctx->bh_lock);
rfifolock_init(&ctx->lock, aio_rfifolock_cb, ctx);
rfifolock_init(&ctx->lock, NULL, NULL);
timerlistgroup_init(&ctx->tlg, aio_timerlist_notify, ctx);

return ctx;
Expand Down

0 comments on commit a9392bc

Please sign in to comment.