Skip to content

Commit

Permalink
migration: Move last_sent_block into PageSearchStatus
Browse files Browse the repository at this point in the history
Since we use PageSearchStatus to represent a channel, it makes perfect
sense to keep last_sent_block (aka, leverage RAM_SAVE_FLAG_CONTINUE) to be
per-channel rather than global because each channel can be sending
different pages on ramblocks.

Hence move it from RAMState into PageSearchStatus.

Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
Signed-off-by: Peter Xu <peterx@redhat.com>
Reviewed-by: Juan Quintela <quintela@redhat.com>
Signed-off-by: Juan Quintela <quintela@redhat.com>
  • Loading branch information
xzpeter authored and Juan Quintela committed Nov 15, 2022
1 parent 4c11610 commit 366e80d
Showing 1 changed file with 41 additions and 30 deletions.
71 changes: 41 additions & 30 deletions migration/ram.c
Expand Up @@ -117,6 +117,8 @@ XBZRLECacheStats xbzrle_counters;
struct PageSearchStatus {
/* The migration channel used for a specific host page */
QEMUFile *pss_channel;
/* Last block from where we have sent data */
RAMBlock *last_sent_block;
/* Current block being searched */
RAMBlock *block;
/* Current page to search from */
Expand Down Expand Up @@ -396,8 +398,6 @@ struct RAMState {
int uffdio_fd;
/* Last block that we have visited searching for dirty pages */
RAMBlock *last_seen_block;
/* Last block from where we have sent data */
RAMBlock *last_sent_block;
/* Last dirty target page we have sent */
ram_addr_t last_page;
/* last ram version we have seen */
Expand Down Expand Up @@ -712,16 +712,17 @@ static int compress_threads_save_setup(void)
*
* Returns the number of bytes written
*
* @f: QEMUFile where to send the data
* @pss: current PSS channel status
* @block: block that contains the page we want to send
* @offset: offset inside the block for the page
* in the lower bits, it contains flags
*/
static size_t save_page_header(RAMState *rs, QEMUFile *f, RAMBlock *block,
static size_t save_page_header(PageSearchStatus *pss, RAMBlock *block,
ram_addr_t offset)
{
size_t size, len;
bool same_block = (block == rs->last_sent_block);
bool same_block = (block == pss->last_sent_block);
QEMUFile *f = pss->pss_channel;

if (same_block) {
offset |= RAM_SAVE_FLAG_CONTINUE;
Expand All @@ -734,7 +735,7 @@ static size_t save_page_header(RAMState *rs, QEMUFile *f, RAMBlock *block,
qemu_put_byte(f, len);
qemu_put_buffer(f, (uint8_t *)block->idstr, len);
size += 1 + len;
rs->last_sent_block = block;
pss->last_sent_block = block;
}
return size;
}
Expand Down Expand Up @@ -818,17 +819,19 @@ static void xbzrle_cache_zero_page(RAMState *rs, ram_addr_t current_addr)
* -1 means that xbzrle would be longer than normal
*
* @rs: current RAM state
* @pss: current PSS channel
* @current_data: pointer to the address of the page contents
* @current_addr: addr of the page
* @block: block that contains the page we want to send
* @offset: offset inside the block for the page
*/
static int save_xbzrle_page(RAMState *rs, QEMUFile *file,
static int save_xbzrle_page(RAMState *rs, PageSearchStatus *pss,
uint8_t **current_data, ram_addr_t current_addr,
RAMBlock *block, ram_addr_t offset)
{
int encoded_len = 0, bytes_xbzrle;
uint8_t *prev_cached_page;
QEMUFile *file = pss->pss_channel;

if (!cache_is_cached(XBZRLE.cache, current_addr,
ram_counters.dirty_sync_count)) {
Expand Down Expand Up @@ -893,7 +896,7 @@ static int save_xbzrle_page(RAMState *rs, QEMUFile *file,
}

/* Send XBZRLE based compressed page */
bytes_xbzrle = save_page_header(rs, file, block,
bytes_xbzrle = save_page_header(pss, block,
offset | RAM_SAVE_FLAG_XBZRLE);
qemu_put_byte(file, ENCODING_FLAG_XBZRLE);
qemu_put_be16(file, encoded_len);
Expand Down Expand Up @@ -1324,19 +1327,19 @@ void ram_release_page(const char *rbname, uint64_t offset)
* Returns the size of data written to the file, 0 means the page is not
* a zero page
*
* @rs: current RAM state
* @file: the file where the data is saved
* @pss: current PSS channel
* @block: block that contains the page we want to send
* @offset: offset inside the block for the page
*/
static int save_zero_page_to_file(RAMState *rs, QEMUFile *file,
static int save_zero_page_to_file(PageSearchStatus *pss,
RAMBlock *block, ram_addr_t offset)
{
uint8_t *p = block->host + offset;
QEMUFile *file = pss->pss_channel;
int len = 0;

if (buffer_is_zero(p, TARGET_PAGE_SIZE)) {
len += save_page_header(rs, file, block, offset | RAM_SAVE_FLAG_ZERO);
len += save_page_header(pss, block, offset | RAM_SAVE_FLAG_ZERO);
qemu_put_byte(file, 0);
len += 1;
ram_release_page(block->idstr, offset);
Expand All @@ -1349,14 +1352,14 @@ static int save_zero_page_to_file(RAMState *rs, QEMUFile *file,
*
* Returns the number of pages written.
*
* @rs: current RAM state
* @pss: current PSS channel
* @block: block that contains the page we want to send
* @offset: offset inside the block for the page
*/
static int save_zero_page(RAMState *rs, QEMUFile *file, RAMBlock *block,
static int save_zero_page(PageSearchStatus *pss, RAMBlock *block,
ram_addr_t offset)
{
int len = save_zero_page_to_file(rs, file, block, offset);
int len = save_zero_page_to_file(pss, block, offset);

if (len) {
stat64_add(&ram_atomic_counters.duplicate, 1);
Expand Down Expand Up @@ -1409,16 +1412,18 @@ static bool control_save_page(PageSearchStatus *pss, RAMBlock *block,
*
* Returns the number of pages written.
*
* @rs: current RAM state
* @pss: current PSS channel
* @block: block that contains the page we want to send
* @offset: offset inside the block for the page
* @buf: the page to be sent
* @async: send to page asyncly
*/
static int save_normal_page(RAMState *rs, QEMUFile *file, RAMBlock *block,
static int save_normal_page(PageSearchStatus *pss, RAMBlock *block,
ram_addr_t offset, uint8_t *buf, bool async)
{
ram_transferred_add(save_page_header(rs, file, block,
QEMUFile *file = pss->pss_channel;

ram_transferred_add(save_page_header(pss, block,
offset | RAM_SAVE_FLAG_PAGE));
if (async) {
qemu_put_buffer_async(file, buf, TARGET_PAGE_SIZE,
Expand Down Expand Up @@ -1458,7 +1463,7 @@ static int ram_save_page(RAMState *rs, PageSearchStatus *pss)

XBZRLE_cache_lock();
if (rs->xbzrle_enabled && !migration_in_postcopy()) {
pages = save_xbzrle_page(rs, pss->pss_channel, &p, current_addr,
pages = save_xbzrle_page(rs, pss, &p, current_addr,
block, offset);
if (!rs->last_stage) {
/* Can't send this cached data async, since the cache page
Expand All @@ -1470,8 +1475,7 @@ static int ram_save_page(RAMState *rs, PageSearchStatus *pss)

/* XBZRLE overflow or normal page */
if (pages == -1) {
pages = save_normal_page(rs, pss->pss_channel, block, offset,
p, send_async);
pages = save_normal_page(pss, block, offset, p, send_async);
}

XBZRLE_cache_unlock();
Expand All @@ -1494,14 +1498,15 @@ static bool do_compress_ram_page(QEMUFile *f, z_stream *stream, RAMBlock *block,
ram_addr_t offset, uint8_t *source_buf)
{
RAMState *rs = ram_state;
PageSearchStatus *pss = &rs->pss[RAM_CHANNEL_PRECOPY];
uint8_t *p = block->host + offset;
int ret;

if (save_zero_page_to_file(rs, f, block, offset)) {
if (save_zero_page_to_file(pss, block, offset)) {
return true;
}

save_page_header(rs, f, block, offset | RAM_SAVE_FLAG_COMPRESS_PAGE);
save_page_header(pss, block, offset | RAM_SAVE_FLAG_COMPRESS_PAGE);

/*
* copy it to a internal buffer to avoid it being modified by VM
Expand Down Expand Up @@ -2321,7 +2326,8 @@ static bool save_page_use_compression(RAMState *rs)
* has been properly handled by compression, otherwise needs other
* paths to handle it
*/
static bool save_compress_page(RAMState *rs, RAMBlock *block, ram_addr_t offset)
static bool save_compress_page(RAMState *rs, PageSearchStatus *pss,
RAMBlock *block, ram_addr_t offset)
{
if (!save_page_use_compression(rs)) {
return false;
Expand All @@ -2337,7 +2343,7 @@ static bool save_compress_page(RAMState *rs, RAMBlock *block, ram_addr_t offset)
* We post the fist page as normal page as compression will take
* much CPU resource.
*/
if (block != rs->last_sent_block) {
if (block != pss->last_sent_block) {
flush_compressed_data(rs);
return false;
}
Expand Down Expand Up @@ -2368,11 +2374,11 @@ static int ram_save_target_page(RAMState *rs, PageSearchStatus *pss)
return res;
}

if (save_compress_page(rs, block, offset)) {
if (save_compress_page(rs, pss, block, offset)) {
return 1;
}

res = save_zero_page(rs, pss->pss_channel, block, offset);
res = save_zero_page(pss, block, offset);
if (res > 0) {
/* Must let xbzrle know, otherwise a previous (now 0'd) cached
* page would be stale
Expand Down Expand Up @@ -2503,7 +2509,7 @@ static void postcopy_preempt_choose_channel(RAMState *rs, PageSearchStatus *pss)
* If channel switched, reset last_sent_block since the old sent block
* may not be on the same channel.
*/
rs->last_sent_block = NULL;
pss->last_sent_block = NULL;

trace_postcopy_preempt_switch_channel(channel);
}
Expand Down Expand Up @@ -2842,8 +2848,13 @@ static void ram_save_cleanup(void *opaque)

static void ram_state_reset(RAMState *rs)
{
int i;

for (i = 0; i < RAM_CHANNEL_MAX; i++) {
rs->pss[i].last_sent_block = NULL;
}

rs->last_seen_block = NULL;
rs->last_sent_block = NULL;
rs->last_page = 0;
rs->last_version = ram_list.version;
rs->xbzrle_enabled = false;
Expand Down Expand Up @@ -3037,8 +3048,8 @@ void ram_postcopy_send_discard_bitmap(MigrationState *ms)
migration_bitmap_sync(rs);

/* Easiest way to make sure we don't resume in the middle of a host-page */
rs->pss[RAM_CHANNEL_PRECOPY].last_sent_block = NULL;
rs->last_seen_block = NULL;
rs->last_sent_block = NULL;
rs->last_page = 0;

postcopy_each_ram_send_discard(ms);
Expand Down

0 comments on commit 366e80d

Please sign in to comment.