Skip to content

Commit

Permalink
block-backend: Queue requests while drained
Browse files Browse the repository at this point in the history
This fixes devices like IDE that can still start new requests from I/O
handlers in the CPU thread while the block backend is drained.

The basic assumption is that in a drain section, no new requests should
be allowed through a BlockBackend (blk_drained_begin/end don't exist,
we get drain sections only on the node level). However, there are two
special cases where requests should not be queued:

1. Block jobs: We already make sure that block jobs are paused in a
   drain section, so they won't start new requests. However, if the
   drain_begin is called on the job's BlockBackend first, it can happen
   that we deadlock because the job stays busy until it reaches a pause
   point - which it can't if its requests aren't processed any more.

   The proper solution here would be to make all requests through the
   job's filter node instead of using a BlockBackend. For now, just
   disabling request queuing on the job BlockBackend is simpler.

2. In test cases where making requests through bdrv_* would be
   cumbersome because we'd need a BdrvChild. As we already got the
   functionality to disable request queuing from 1., use it in tests,
   too, for convenience.

Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Reviewed-by: Max Reitz <mreitz@redhat.com>
  • Loading branch information
kevmw committed Aug 16, 2019
1 parent d2da5e2 commit cf31293
Show file tree
Hide file tree
Showing 7 changed files with 59 additions and 3 deletions.
1 change: 1 addition & 0 deletions block/backup.c
Expand Up @@ -644,6 +644,7 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
if (ret < 0) {
goto error;
}
blk_set_disable_request_queuing(job->target, true);

job->on_source_error = on_source_error;
job->on_target_error = on_target_error;
Expand Down
53 changes: 50 additions & 3 deletions block/block-backend.c
Expand Up @@ -79,6 +79,9 @@ struct BlockBackend {
QLIST_HEAD(, BlockBackendAioNotifier) aio_notifiers;

int quiesce_counter;
CoQueue queued_requests;
bool disable_request_queuing;

VMChangeStateEntry *vmsh;
bool force_allow_inactivate;

Expand Down Expand Up @@ -339,6 +342,7 @@ BlockBackend *blk_new(AioContext *ctx, uint64_t perm, uint64_t shared_perm)

block_acct_init(&blk->stats);

qemu_co_queue_init(&blk->queued_requests);
notifier_list_init(&blk->remove_bs_notifiers);
notifier_list_init(&blk->insert_bs_notifiers);
QLIST_INIT(&blk->aio_notifiers);
Expand Down Expand Up @@ -1096,6 +1100,11 @@ void blk_set_allow_aio_context_change(BlockBackend *blk, bool allow)
blk->allow_aio_context_change = allow;
}

void blk_set_disable_request_queuing(BlockBackend *blk, bool disable)
{
blk->disable_request_queuing = disable;
}

static int blk_check_byte_request(BlockBackend *blk, int64_t offset,
size_t size)
{
Expand Down Expand Up @@ -1127,13 +1136,24 @@ static int blk_check_byte_request(BlockBackend *blk, int64_t offset,
return 0;
}

static void coroutine_fn blk_wait_while_drained(BlockBackend *blk)
{
if (blk->quiesce_counter && !blk->disable_request_queuing) {
qemu_co_queue_wait(&blk->queued_requests, NULL);
}
}

int coroutine_fn blk_co_preadv(BlockBackend *blk, int64_t offset,
unsigned int bytes, QEMUIOVector *qiov,
BdrvRequestFlags flags)
{
int ret;
BlockDriverState *bs = blk_bs(blk);
BlockDriverState *bs;

blk_wait_while_drained(blk);

/* Call blk_bs() only after waiting, the graph may have changed */
bs = blk_bs(blk);
trace_blk_co_preadv(blk, bs, offset, bytes, flags);

ret = blk_check_byte_request(blk, offset, bytes);
Expand All @@ -1159,8 +1179,12 @@ int coroutine_fn blk_co_pwritev(BlockBackend *blk, int64_t offset,
BdrvRequestFlags flags)
{
int ret;
BlockDriverState *bs = blk_bs(blk);
BlockDriverState *bs;

blk_wait_while_drained(blk);

/* Call blk_bs() only after waiting, the graph may have changed */
bs = blk_bs(blk);
trace_blk_co_pwritev(blk, bs, offset, bytes, flags);

ret = blk_check_byte_request(blk, offset, bytes);
Expand Down Expand Up @@ -1349,6 +1373,12 @@ static void blk_aio_read_entry(void *opaque)
BlkRwCo *rwco = &acb->rwco;
QEMUIOVector *qiov = rwco->iobuf;

if (rwco->blk->quiesce_counter) {
blk_dec_in_flight(rwco->blk);
blk_wait_while_drained(rwco->blk);
blk_inc_in_flight(rwco->blk);
}

assert(qiov->size == acb->bytes);
rwco->ret = blk_co_preadv(rwco->blk, rwco->offset, acb->bytes,
qiov, rwco->flags);
Expand All @@ -1361,6 +1391,12 @@ static void blk_aio_write_entry(void *opaque)
BlkRwCo *rwco = &acb->rwco;
QEMUIOVector *qiov = rwco->iobuf;

if (rwco->blk->quiesce_counter) {
blk_dec_in_flight(rwco->blk);
blk_wait_while_drained(rwco->blk);
blk_inc_in_flight(rwco->blk);
}

assert(!qiov || qiov->size == acb->bytes);
rwco->ret = blk_co_pwritev(rwco->blk, rwco->offset, acb->bytes,
qiov, rwco->flags);
Expand Down Expand Up @@ -1482,6 +1518,8 @@ void blk_aio_cancel_async(BlockAIOCB *acb)

int blk_co_ioctl(BlockBackend *blk, unsigned long int req, void *buf)
{
blk_wait_while_drained(blk);

if (!blk_is_available(blk)) {
return -ENOMEDIUM;
}
Expand Down Expand Up @@ -1522,7 +1560,11 @@ BlockAIOCB *blk_aio_ioctl(BlockBackend *blk, unsigned long int req, void *buf,

int blk_co_pdiscard(BlockBackend *blk, int64_t offset, int bytes)
{
int ret = blk_check_byte_request(blk, offset, bytes);
int ret;

blk_wait_while_drained(blk);

ret = blk_check_byte_request(blk, offset, bytes);
if (ret < 0) {
return ret;
}
Expand All @@ -1532,6 +1574,8 @@ int blk_co_pdiscard(BlockBackend *blk, int64_t offset, int bytes)

int blk_co_flush(BlockBackend *blk)
{
blk_wait_while_drained(blk);

if (!blk_is_available(blk)) {
return -ENOMEDIUM;
}
Expand Down Expand Up @@ -2232,6 +2276,9 @@ static void blk_root_drained_end(BdrvChild *child, int *drained_end_counter)
if (blk->dev_ops && blk->dev_ops->drained_end) {
blk->dev_ops->drained_end(blk->dev_opaque);
}
while (qemu_co_enter_next(&blk->queued_requests, NULL)) {
/* Resume all queued requests */
}
}
}

Expand Down
2 changes: 2 additions & 0 deletions block/commit.c
Expand Up @@ -350,6 +350,7 @@ void commit_start(const char *job_id, BlockDriverState *bs,
if (ret < 0) {
goto fail;
}
blk_set_disable_request_queuing(s->base, true);
s->base_bs = base;

/* Required permissions are already taken with block_job_add_bdrv() */
Expand All @@ -358,6 +359,7 @@ void commit_start(const char *job_id, BlockDriverState *bs,
if (ret < 0) {
goto fail;
}
blk_set_disable_request_queuing(s->top, true);

s->backing_file_str = g_strdup(backing_file_str);
s->on_error = on_error;
Expand Down
1 change: 1 addition & 0 deletions block/mirror.c
Expand Up @@ -1636,6 +1636,7 @@ static BlockJob *mirror_start_job(
blk_set_force_allow_inactivate(s->target);
}
blk_set_allow_aio_context_change(s->target, true);
blk_set_disable_request_queuing(s->target, true);

s->replaces = g_strdup(replaces);
s->on_source_error = on_source_error;
Expand Down
3 changes: 3 additions & 0 deletions blockjob.c
Expand Up @@ -445,6 +445,9 @@ void *block_job_create(const char *job_id, const BlockJobDriver *driver,

bdrv_op_unblock(bs, BLOCK_OP_TYPE_DATAPLANE, job->blocker);

/* Disable request queuing in the BlockBackend to avoid deadlocks on drain:
* The job reports that it's busy until it reaches a pause point. */
blk_set_disable_request_queuing(blk, true);
blk_set_allow_aio_context_change(blk, true);

/* Only set speed when necessary to avoid NotSupported error */
Expand Down
1 change: 1 addition & 0 deletions include/sysemu/block-backend.h
Expand Up @@ -104,6 +104,7 @@ void blk_get_perm(BlockBackend *blk, uint64_t *perm, uint64_t *shared_perm);

void blk_set_allow_write_beyond_eof(BlockBackend *blk, bool allow);
void blk_set_allow_aio_context_change(BlockBackend *blk, bool allow);
void blk_set_disable_request_queuing(BlockBackend *blk, bool disable);
void blk_iostatus_enable(BlockBackend *blk);
bool blk_iostatus_is_enabled(const BlockBackend *blk);
BlockDeviceIoStatus blk_iostatus(const BlockBackend *blk);
Expand Down
1 change: 1 addition & 0 deletions tests/test-bdrv-drain.c
Expand Up @@ -686,6 +686,7 @@ static void test_iothread_common(enum drain_type drain_type, int drain_thread)
&error_abort);
s = bs->opaque;
blk_insert_bs(blk, bs, &error_abort);
blk_set_disable_request_queuing(blk, true);

blk_set_aio_context(blk, ctx_a, &error_abort);
aio_context_acquire(ctx_a);
Expand Down

0 comments on commit cf31293

Please sign in to comment.