Skip to content

Commit

Permalink
blockjob: Track job ratelimits via bytes, not sectors
Browse files Browse the repository at this point in the history
The user interface specifies job rate limits in bytes/second.
It's pointless to have our internal representation track things
in sectors/second, particularly since we want to move away from
sector-based interfaces.

Fix up a doc typo found while verifying that the ratelimit
code handles the scaling difference.

Repetition of expressions like 'n * BDRV_SECTOR_SIZE' will be
cleaned up later when functions are converted to iterate over
images by bytes rather than by sectors.

Signed-off-by: Eric Blake <eblake@redhat.com>
Reviewed-by: John Snow <jsnow@redhat.com>
Reviewed-by: Jeff Cody <jcody@redhat.com>
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
  • Loading branch information
ebblake authored and kevmw committed Jul 10, 2017
1 parent c616f16 commit f3e4ce4
Show file tree
Hide file tree
Showing 5 changed files with 18 additions and 13 deletions.
5 changes: 3 additions & 2 deletions block/backup.c
Expand Up @@ -208,7 +208,7 @@ static void backup_set_speed(BlockJob *job, int64_t speed, Error **errp)
error_setg(errp, QERR_INVALID_PARAMETER, "speed");
return;
}
ratelimit_set_speed(&s->limit, speed / BDRV_SECTOR_SIZE, SLICE_TIME);
ratelimit_set_speed(&s->limit, speed, SLICE_TIME);
}

static void backup_cleanup_sync_bitmap(BackupBlockJob *job, int ret)
Expand Down Expand Up @@ -359,7 +359,8 @@ static bool coroutine_fn yield_and_check(BackupBlockJob *job)
*/
if (job->common.speed) {
uint64_t delay_ns = ratelimit_calculate_delay(&job->limit,
job->sectors_read);
job->sectors_read *
BDRV_SECTOR_SIZE);
job->sectors_read = 0;
block_job_sleep_ns(&job->common, QEMU_CLOCK_REALTIME, delay_ns);
} else {
Expand Down
5 changes: 3 additions & 2 deletions block/commit.c
Expand Up @@ -209,7 +209,8 @@ static void coroutine_fn commit_run(void *opaque)
s->common.offset += n * BDRV_SECTOR_SIZE;

if (copy && s->common.speed) {
delay_ns = ratelimit_calculate_delay(&s->limit, n);
delay_ns = ratelimit_calculate_delay(&s->limit,
n * BDRV_SECTOR_SIZE);
}
}

Expand All @@ -231,7 +232,7 @@ static void commit_set_speed(BlockJob *job, int64_t speed, Error **errp)
error_setg(errp, QERR_INVALID_PARAMETER, "speed");
return;
}
ratelimit_set_speed(&s->limit, speed / BDRV_SECTOR_SIZE, SLICE_TIME);
ratelimit_set_speed(&s->limit, speed, SLICE_TIME);
}

static const BlockJobDriver commit_job_driver = {
Expand Down
13 changes: 7 additions & 6 deletions block/mirror.c
Expand Up @@ -396,7 +396,8 @@ static uint64_t coroutine_fn mirror_iteration(MirrorBlockJob *s)
bitmap_set(s->in_flight_bitmap, sector_num / sectors_per_chunk, nb_chunks);
while (nb_chunks > 0 && sector_num < end) {
int64_t ret;
int io_sectors, io_sectors_acct;
int io_sectors;
int64_t io_bytes_acct;
BlockDriverState *file;
enum MirrorMethod {
MIRROR_METHOD_COPY,
Expand Down Expand Up @@ -444,16 +445,16 @@ static uint64_t coroutine_fn mirror_iteration(MirrorBlockJob *s)
switch (mirror_method) {
case MIRROR_METHOD_COPY:
io_sectors = mirror_do_read(s, sector_num, io_sectors);
io_sectors_acct = io_sectors;
io_bytes_acct = io_sectors * BDRV_SECTOR_SIZE;
break;
case MIRROR_METHOD_ZERO:
case MIRROR_METHOD_DISCARD:
mirror_do_zero_or_discard(s, sector_num, io_sectors,
mirror_method == MIRROR_METHOD_DISCARD);
if (write_zeroes_ok) {
io_sectors_acct = 0;
io_bytes_acct = 0;
} else {
io_sectors_acct = io_sectors;
io_bytes_acct = io_sectors * BDRV_SECTOR_SIZE;
}
break;
default:
Expand All @@ -463,7 +464,7 @@ static uint64_t coroutine_fn mirror_iteration(MirrorBlockJob *s)
sector_num += io_sectors;
nb_chunks -= DIV_ROUND_UP(io_sectors, sectors_per_chunk);
if (s->common.speed) {
delay_ns = ratelimit_calculate_delay(&s->limit, io_sectors_acct);
delay_ns = ratelimit_calculate_delay(&s->limit, io_bytes_acct);
}
}
return delay_ns;
Expand Down Expand Up @@ -929,7 +930,7 @@ static void mirror_set_speed(BlockJob *job, int64_t speed, Error **errp)
error_setg(errp, QERR_INVALID_PARAMETER, "speed");
return;
}
ratelimit_set_speed(&s->limit, speed / BDRV_SECTOR_SIZE, SLICE_TIME);
ratelimit_set_speed(&s->limit, speed, SLICE_TIME);
}

static void mirror_complete(BlockJob *job, Error **errp)
Expand Down
5 changes: 3 additions & 2 deletions block/stream.c
Expand Up @@ -191,7 +191,8 @@ static void coroutine_fn stream_run(void *opaque)
/* Publish progress */
s->common.offset += n * BDRV_SECTOR_SIZE;
if (copy && s->common.speed) {
delay_ns = ratelimit_calculate_delay(&s->limit, n);
delay_ns = ratelimit_calculate_delay(&s->limit,
n * BDRV_SECTOR_SIZE);
}
}

Expand Down Expand Up @@ -220,7 +221,7 @@ static void stream_set_speed(BlockJob *job, int64_t speed, Error **errp)
error_setg(errp, QERR_INVALID_PARAMETER, "speed");
return;
}
ratelimit_set_speed(&s->limit, speed / BDRV_SECTOR_SIZE, SLICE_TIME);
ratelimit_set_speed(&s->limit, speed, SLICE_TIME);
}

static const BlockJobDriver stream_job_driver = {
Expand Down
3 changes: 2 additions & 1 deletion include/qemu/ratelimit.h
Expand Up @@ -24,7 +24,8 @@ typedef struct {

/** Calculate and return delay for next request in ns
*
* Record that we sent @p n data units. If we may send more data units
* Record that we sent @n data units (where @n matches the scale chosen
* during ratelimit_set_speed). If we may send more data units
* in the current time slice, return 0 (i.e. no delay). Otherwise
* return the amount of time (in ns) until the start of the next time
* slice that will permit sending the next chunk of data.
Expand Down

0 comments on commit f3e4ce4

Please sign in to comment.