Skip to content

Commit

Permalink
block/backup: refactor: split out backup_calculate_cluster_size
Browse files Browse the repository at this point in the history
Split out cluster_size calculation. Move copy-bitmap creation above
block-job creation, as we are going to share it with upcoming
backup-top filter, which also should be created before actual block job
creation.

Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
Message-id: 20190429090842.57910-6-vsementsov@virtuozzo.com
[mreitz: Dropped a paragraph from the commit message that was left over
         from a previous version]
Signed-off-by: Max Reitz <mreitz@redhat.com>
(cherry picked from commit ae6b12f)
*prereq for 110571b
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
  • Loading branch information
Vladimir Sementsov-Ogievskiy authored and mdroth committed Oct 1, 2019
1 parent 54d45c8 commit 872b7b8
Showing 1 changed file with 52 additions and 30 deletions.
82 changes: 52 additions & 30 deletions block/backup.c
Expand Up @@ -507,6 +507,42 @@ static const BlockJobDriver backup_job_driver = {
.drain = backup_drain,
};

static int64_t backup_calculate_cluster_size(BlockDriverState *target,
Error **errp)
{
int ret;
BlockDriverInfo bdi;

/*
* If there is no backing file on the target, we cannot rely on COW if our
* backup cluster size is smaller than the target cluster size. Even for
* targets with a backing file, try to avoid COW if possible.
*/
ret = bdrv_get_info(target, &bdi);
if (ret == -ENOTSUP && !target->backing) {
/* Cluster size is not defined */
warn_report("The target block device doesn't provide "
"information about the block size and it doesn't have a "
"backing file. The default block size of %u bytes is "
"used. If the actual block size of the target exceeds "
"this default, the backup may be unusable",
BACKUP_CLUSTER_SIZE_DEFAULT);
return BACKUP_CLUSTER_SIZE_DEFAULT;
} else if (ret < 0 && !target->backing) {
error_setg_errno(errp, -ret,
"Couldn't determine the cluster size of the target image, "
"which has no backing file");
error_append_hint(errp,
"Aborting, since this may create an unusable destination image\n");
return ret;
} else if (ret < 0 && target->backing) {
/* Not fatal; just trudge on ahead. */
return BACKUP_CLUSTER_SIZE_DEFAULT;
}

return MAX(BACKUP_CLUSTER_SIZE_DEFAULT, bdi.cluster_size);
}

BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
BlockDriverState *target, int64_t speed,
MirrorSyncMode sync_mode, BdrvDirtyBitmap *sync_bitmap,
Expand All @@ -518,9 +554,10 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
JobTxn *txn, Error **errp)
{
int64_t len;
BlockDriverInfo bdi;
BackupBlockJob *job = NULL;
int ret;
int64_t cluster_size;
HBitmap *copy_bitmap = NULL;

assert(bs);
assert(target);
Expand Down Expand Up @@ -582,6 +619,13 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
goto error;
}

cluster_size = backup_calculate_cluster_size(target, errp);
if (cluster_size < 0) {
goto error;
}

copy_bitmap = hbitmap_alloc(len, ctz32(cluster_size));

/* job->len is fixed, so we can't allow resize */
job = block_job_create(job_id, &backup_job_driver, txn, bs,
BLK_PERM_CONSISTENT_READ,
Expand Down Expand Up @@ -610,35 +654,9 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,

/* Detect image-fleecing (and similar) schemes */
job->serialize_target_writes = bdrv_chain_contains(target, bs);

/* If there is no backing file on the target, we cannot rely on COW if our
* backup cluster size is smaller than the target cluster size. Even for
* targets with a backing file, try to avoid COW if possible. */
ret = bdrv_get_info(target, &bdi);
if (ret == -ENOTSUP && !target->backing) {
/* Cluster size is not defined */
warn_report("The target block device doesn't provide "
"information about the block size and it doesn't have a "
"backing file. The default block size of %u bytes is "
"used. If the actual block size of the target exceeds "
"this default, the backup may be unusable",
BACKUP_CLUSTER_SIZE_DEFAULT);
job->cluster_size = BACKUP_CLUSTER_SIZE_DEFAULT;
} else if (ret < 0 && !target->backing) {
error_setg_errno(errp, -ret,
"Couldn't determine the cluster size of the target image, "
"which has no backing file");
error_append_hint(errp,
"Aborting, since this may create an unusable destination image\n");
goto error;
} else if (ret < 0 && target->backing) {
/* Not fatal; just trudge on ahead. */
job->cluster_size = BACKUP_CLUSTER_SIZE_DEFAULT;
} else {
job->cluster_size = MAX(BACKUP_CLUSTER_SIZE_DEFAULT, bdi.cluster_size);
}

job->copy_bitmap = hbitmap_alloc(len, ctz32(job->cluster_size));
job->cluster_size = cluster_size;
job->copy_bitmap = copy_bitmap;
copy_bitmap = NULL;
job->use_copy_range = true;
job->copy_range_size = MIN_NON_ZERO(blk_get_max_transfer(job->common.blk),
blk_get_max_transfer(job->target));
Expand All @@ -654,6 +672,10 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
return &job->common;

error:
if (copy_bitmap) {
assert(!job || !job->copy_bitmap);
hbitmap_free(copy_bitmap);
}
if (sync_bitmap) {
bdrv_reclaim_dirty_bitmap(bs, sync_bitmap, NULL);
}
Expand Down

0 comments on commit 872b7b8

Please sign in to comment.