Skip to content

Commit

Permalink
Merge remote-tracking branch 'remotes/dgilbert/tags/pull-migration-20…
Browse files Browse the repository at this point in the history
…180926a' into staging

Migration pull 2018-09-26

This supercedes Juan's pull from the 13th

# gpg: Signature made Wed 26 Sep 2018 18:07:30 BST
# gpg:                using RSA key 0516331EBC5BFDE7
# gpg: Good signature from "Dr. David Alan Gilbert (RH2) <dgilbert@redhat.com>"
# Primary key fingerprint: 45F5 C71B 4A0C B7FB 977A  9FA9 0516 331E BC5B FDE7

* remotes/dgilbert/tags/pull-migration-20180926a:
  migration/ram.c: Avoid taking address of fields in packed MultiFDInit_t struct
  migration: fix the compression code
  migration: fix QEMUFile leak
  tests/migration: Speed up the test on ppc64
  migration: cleanup in error paths in loadvm
  migration/postcopy: Clear have_listen_thread
  tests/migration: Add migration-test header file
  tests/migration: Support cross compilation in generating boot header file
  tests/migration: Convert x86 boot block compilation script into Makefile
  migration: use save_page_use_compression in flush_compressed_data
  migration: show the statistics of compression
  migration: do not flush_compressed_data at the end of iteration
  Add a hint message to loadvm and exits on failure
  migration: handle the error condition properly
  migration: fix calculating xbzrle_counters.cache_miss_rate
  migration/rdma: Fix uninitialised rdma_return_path

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
  • Loading branch information
pm215 committed Sep 28, 2018
2 parents 567ea80 + 341ba0d commit 042938f
Show file tree
Hide file tree
Showing 15 changed files with 255 additions and 98 deletions.
13 changes: 13 additions & 0 deletions hmp.c
Expand Up @@ -271,6 +271,19 @@ void hmp_info_migrate(Monitor *mon, const QDict *qdict)
info->xbzrle_cache->overflow);
}

if (info->has_compression) {
monitor_printf(mon, "compression pages: %" PRIu64 " pages\n",
info->compression->pages);
monitor_printf(mon, "compression busy: %" PRIu64 "\n",
info->compression->busy);
monitor_printf(mon, "compression busy rate: %0.2f\n",
info->compression->busy_rate);
monitor_printf(mon, "compressed size: %" PRIu64 "\n",
info->compression->compressed_size);
monitor_printf(mon, "compression rate: %0.2f\n",
info->compression->compression_rate);
}

if (info->has_cpu_throttle_percentage) {
monitor_printf(mon, "cpu throttle percentage: %" PRIu64 "\n",
info->cpu_throttle_percentage);
Expand Down
17 changes: 16 additions & 1 deletion migration/migration.c
Expand Up @@ -758,6 +758,18 @@ static void populate_ram_info(MigrationInfo *info, MigrationState *s)
info->xbzrle_cache->overflow = xbzrle_counters.overflow;
}

if (migrate_use_compression()) {
info->has_compression = true;
info->compression = g_malloc0(sizeof(*info->compression));
info->compression->pages = compression_counters.pages;
info->compression->busy = compression_counters.busy;
info->compression->busy_rate = compression_counters.busy_rate;
info->compression->compressed_size =
compression_counters.compressed_size;
info->compression->compression_rate =
compression_counters.compression_rate;
}

if (cpu_throttle_active()) {
info->has_cpu_throttle_percentage = true;
info->cpu_throttle_percentage = cpu_throttle_get_percentage();
Expand Down Expand Up @@ -2268,7 +2280,10 @@ static void *source_return_path_thread(void *opaque)
*/
if (postcopy_pause_return_path_thread(ms)) {
/* Reload rp, reset the rest */
rp = ms->rp_state.from_dst_file;
if (rp != ms->rp_state.from_dst_file) {
qemu_fclose(rp);
rp = ms->rp_state.from_dst_file;
}
ms->rp_state.error = false;
goto retry;
}
Expand Down
133 changes: 91 additions & 42 deletions migration/ram.c
Expand Up @@ -301,10 +301,19 @@ struct RAMState {
uint64_t num_dirty_pages_period;
/* xbzrle misses since the beginning of the period */
uint64_t xbzrle_cache_miss_prev;
/* number of iterations at the beginning of period */
uint64_t iterations_prev;
/* Iterations since start */
uint64_t iterations;

/* compression statistics since the beginning of the period */
/* amount of count that no free thread to compress data */
uint64_t compress_thread_busy_prev;
/* amount bytes after compression */
uint64_t compressed_size_prev;
/* amount of compressed pages */
uint64_t compress_pages_prev;

/* total handled target pages at the beginning of period */
uint64_t target_page_count_prev;
/* total handled target pages since start */
uint64_t target_page_count;
/* number of dirty bits in the bitmap */
uint64_t migration_dirty_pages;
/* protects modification of the bitmap */
Expand Down Expand Up @@ -338,6 +347,8 @@ struct PageSearchStatus {
};
typedef struct PageSearchStatus PageSearchStatus;

CompressionStats compression_counters;

struct CompressParam {
bool done;
bool quit;
Expand Down Expand Up @@ -420,28 +431,14 @@ static void *do_data_compress(void *opaque)
return NULL;
}

static inline void terminate_compression_threads(void)
{
int idx, thread_count;

thread_count = migrate_compress_threads();

for (idx = 0; idx < thread_count; idx++) {
qemu_mutex_lock(&comp_param[idx].mutex);
comp_param[idx].quit = true;
qemu_cond_signal(&comp_param[idx].cond);
qemu_mutex_unlock(&comp_param[idx].mutex);
}
}

static void compress_threads_save_cleanup(void)
{
int i, thread_count;

if (!migrate_use_compression()) {
if (!migrate_use_compression() || !comp_param) {
return;
}
terminate_compression_threads();

thread_count = migrate_compress_threads();
for (i = 0; i < thread_count; i++) {
/*
Expand All @@ -451,6 +448,12 @@ static void compress_threads_save_cleanup(void)
if (!comp_param[i].file) {
break;
}

qemu_mutex_lock(&comp_param[i].mutex);
comp_param[i].quit = true;
qemu_cond_signal(&comp_param[i].cond);
qemu_mutex_unlock(&comp_param[i].mutex);

qemu_thread_join(compress_threads + i);
qemu_mutex_destroy(&comp_param[i].mutex);
qemu_cond_destroy(&comp_param[i].cond);
Expand Down Expand Up @@ -648,8 +651,8 @@ static int multifd_recv_initial_packet(QIOChannel *c, Error **errp)
return -1;
}

be32_to_cpus(&msg.magic);
be32_to_cpus(&msg.version);
msg.magic = be32_to_cpu(msg.magic);
msg.version = be32_to_cpu(msg.version);

if (msg.magic != MULTIFD_MAGIC) {
error_setg(errp, "multifd: received packet magic %x "
Expand Down Expand Up @@ -734,15 +737,15 @@ static int multifd_recv_unfill_packet(MultiFDRecvParams *p, Error **errp)
RAMBlock *block;
int i;

be32_to_cpus(&packet->magic);
packet->magic = be32_to_cpu(packet->magic);
if (packet->magic != MULTIFD_MAGIC) {
error_setg(errp, "multifd: received packet "
"magic %x and expected magic %x",
packet->magic, MULTIFD_MAGIC);
return -1;
}

be32_to_cpus(&packet->version);
packet->version = be32_to_cpu(packet->version);
if (packet->version != MULTIFD_VERSION) {
error_setg(errp, "multifd: received packet "
"version %d and expected version %d",
Expand All @@ -752,7 +755,7 @@ static int multifd_recv_unfill_packet(MultiFDRecvParams *p, Error **errp)

p->flags = be32_to_cpu(packet->flags);

be32_to_cpus(&packet->size);
packet->size = be32_to_cpu(packet->size);
if (packet->size > migrate_multifd_page_count()) {
error_setg(errp, "multifd: received packet "
"with size %d and expected maximum size %d",
Expand Down Expand Up @@ -1592,21 +1595,42 @@ uint64_t ram_pagesize_summary(void)

static void migration_update_rates(RAMState *rs, int64_t end_time)
{
uint64_t iter_count = rs->iterations - rs->iterations_prev;
uint64_t page_count = rs->target_page_count - rs->target_page_count_prev;
double compressed_size;

/* calculate period counters */
ram_counters.dirty_pages_rate = rs->num_dirty_pages_period * 1000
/ (end_time - rs->time_last_bitmap_sync);

if (!iter_count) {
if (!page_count) {
return;
}

if (migrate_use_xbzrle()) {
xbzrle_counters.cache_miss_rate = (double)(xbzrle_counters.cache_miss -
rs->xbzrle_cache_miss_prev) / iter_count;
rs->xbzrle_cache_miss_prev) / page_count;
rs->xbzrle_cache_miss_prev = xbzrle_counters.cache_miss;
}

if (migrate_use_compression()) {
compression_counters.busy_rate = (double)(compression_counters.busy -
rs->compress_thread_busy_prev) / page_count;
rs->compress_thread_busy_prev = compression_counters.busy;

compressed_size = compression_counters.compressed_size -
rs->compressed_size_prev;
if (compressed_size) {
double uncompressed_size = (compression_counters.pages -
rs->compress_pages_prev) * TARGET_PAGE_SIZE;

/* Compression-Ratio = Uncompressed-size / Compressed-size */
compression_counters.compression_rate =
uncompressed_size / compressed_size;

rs->compress_pages_prev = compression_counters.pages;
rs->compressed_size_prev = compression_counters.compressed_size;
}
}
}

static void migration_bitmap_sync(RAMState *rs)
Expand Down Expand Up @@ -1662,7 +1686,7 @@ static void migration_bitmap_sync(RAMState *rs)

migration_update_rates(rs, end_time);

rs->iterations_prev = rs->iterations;
rs->target_page_count_prev = rs->target_page_count;

/* reset period counters */
rs->time_last_bitmap_sync = end_time;
Expand Down Expand Up @@ -1888,17 +1912,25 @@ static bool do_compress_ram_page(QEMUFile *f, z_stream *stream, RAMBlock *block,
static void
update_compress_thread_counts(const CompressParam *param, int bytes_xmit)
{
ram_counters.transferred += bytes_xmit;

if (param->zero_page) {
ram_counters.duplicate++;
return;
}
ram_counters.transferred += bytes_xmit;

/* 8 means a header with RAM_SAVE_FLAG_CONTINUE. */
compression_counters.compressed_size += bytes_xmit - 8;
compression_counters.pages++;
}

static bool save_page_use_compression(RAMState *rs);

static void flush_compressed_data(RAMState *rs)
{
int idx, len, thread_count;

if (!migrate_use_compression()) {
if (!save_page_use_compression(rs)) {
return;
}
thread_count = migrate_compress_threads();
Expand Down Expand Up @@ -1996,17 +2028,22 @@ static bool find_dirty_block(RAMState *rs, PageSearchStatus *pss, bool *again)
pss->page = 0;
pss->block = QLIST_NEXT_RCU(pss->block, next);
if (!pss->block) {
/*
* If memory migration starts over, we will meet a dirtied page
* which may still exists in compression threads's ring, so we
* should flush the compressed data to make sure the new page
* is not overwritten by the old one in the destination.
*
* Also If xbzrle is on, stop using the data compression at this
* point. In theory, xbzrle can do better than compression.
*/
flush_compressed_data(rs);

/* Hit the end of the list */
pss->block = QLIST_FIRST_RCU(&ram_list.blocks);
/* Flag that we've looped */
pss->complete_round = true;
rs->ram_bulk_stage = false;
if (migrate_use_xbzrle()) {
/* If xbzrle is on, stop using the data compression at this
* point. In theory, xbzrle can do better than compression.
*/
flush_compressed_data(rs);
}
}
/* Didn't find anything this time, but try again on the new block */
*again = true;
Expand Down Expand Up @@ -2259,6 +2296,7 @@ static bool save_compress_page(RAMState *rs, RAMBlock *block, ram_addr_t offset)
return true;
}

compression_counters.busy++;
return false;
}

Expand Down Expand Up @@ -2372,7 +2410,8 @@ static int ram_save_host_page(RAMState *rs, PageSearchStatus *pss,
*
* Called within an RCU critical section.
*
* Returns the number of pages written where zero means no dirty pages
* Returns the number of pages written where zero means no dirty pages,
* or negative on error
*
* @rs: current RAM state
* @last_stage: if we are at the completion stage
Expand Down Expand Up @@ -3196,7 +3235,13 @@ static int ram_save_iterate(QEMUFile *f, void *opaque)
done = 1;
break;
}
rs->iterations++;

if (pages < 0) {
qemu_file_set_error(f, pages);
break;
}

rs->target_page_count += pages;

/* we want to check in the 1st loop, just in case it was the 1st time
and we had to sync the dirty bitmap.
Expand All @@ -3212,7 +3257,6 @@ static int ram_save_iterate(QEMUFile *f, void *opaque)
}
i++;
}
flush_compressed_data(rs);
rcu_read_unlock();

/*
Expand All @@ -3238,7 +3282,7 @@ static int ram_save_iterate(QEMUFile *f, void *opaque)
/**
* ram_save_complete: function called to send the remaining amount of ram
*
* Returns zero to indicate success
* Returns zero to indicate success or negative on error
*
* Called with iothread lock
*
Expand All @@ -3249,6 +3293,7 @@ static int ram_save_complete(QEMUFile *f, void *opaque)
{
RAMState **temp = opaque;
RAMState *rs = *temp;
int ret = 0;

rcu_read_lock();

Expand All @@ -3269,6 +3314,10 @@ static int ram_save_complete(QEMUFile *f, void *opaque)
if (pages == 0) {
break;
}
if (pages < 0) {
ret = pages;
break;
}
}

flush_compressed_data(rs);
Expand All @@ -3280,7 +3329,7 @@ static int ram_save_complete(QEMUFile *f, void *opaque)
qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
qemu_fflush(f);

return 0;
return ret;
}

static void ram_save_pending(QEMUFile *f, void *opaque, uint64_t max_size,
Expand Down
1 change: 1 addition & 0 deletions migration/ram.h
Expand Up @@ -36,6 +36,7 @@

extern MigrationStats ram_counters;
extern XBZRLECacheStats xbzrle_counters;
extern CompressionStats compression_counters;

int xbzrle_cache_resize(int64_t new_size, Error **errp);
uint64_t ram_bytes_remaining(void);
Expand Down
2 changes: 1 addition & 1 deletion migration/rdma.c
Expand Up @@ -4012,7 +4012,7 @@ static void rdma_accept_incoming_migration(void *opaque)
void rdma_start_incoming_migration(const char *host_port, Error **errp)
{
int ret;
RDMAContext *rdma, *rdma_return_path;
RDMAContext *rdma, *rdma_return_path = NULL;
Error *local_err = NULL;

trace_rdma_start_incoming_migration();
Expand Down

0 comments on commit 042938f

Please sign in to comment.