Skip to content

Commit

Permalink
Browse files Browse the repository at this point in the history
migration: Add last stage indicator to global dirty log
The global dirty log synchronization is used when KVM and dirty ring
are enabled. There is a particularity for ARM64 where the backup
bitmap is used to track dirty pages in non-running-vcpu situations.
It means the dirty ring works with the combination of ring buffer
and backup bitmap. The dirty bits in the backup bitmap needs to
collected in the last stage of live migration.

In order to identify the last stage of live migration and pass it
down, an extra parameter is added to the relevant functions and
callbacks. This last stage indicator isn't used until the dirty
ring is enabled in the subsequent patches.

No functional change intended.

Signed-off-by: Gavin Shan <gshan@redhat.com>
Reviewed-by: Peter Xu <peterx@redhat.com>
Tested-by: Zhenyu Zhang <zhenyzha@redhat.com>
Message-Id: <20230509022122.20888-2-gshan@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
  • Loading branch information
Gavin Shan authored and bonzini committed May 16, 2023
1 parent 4bcde8d commit ac503e8
Show file tree
Hide file tree
Showing 5 changed files with 25 additions and 20 deletions.
2 changes: 1 addition & 1 deletion accel/kvm/kvm-all.c
Expand Up @@ -1563,7 +1563,7 @@ static void kvm_log_sync(MemoryListener *listener,
kvm_slots_unlock();
}

static void kvm_log_sync_global(MemoryListener *l)
static void kvm_log_sync_global(MemoryListener *l, bool last_stage)
{
KVMMemoryListener *kml = container_of(l, KVMMemoryListener, listener);
KVMState *s = kvm_state;
Expand Down
9 changes: 7 additions & 2 deletions include/exec/memory.h
Expand Up @@ -934,8 +934,11 @@ struct MemoryListener {
* its @log_sync must be NULL. Vice versa.
*
* @listener: The #MemoryListener.
* @last_stage: The last stage to synchronize the log during migration.
* The caller should gurantee that the synchronization with true for
* @last_stage is triggered for once after all VCPUs have been stopped.
*/
void (*log_sync_global)(MemoryListener *listener);
void (*log_sync_global)(MemoryListener *listener, bool last_stage);

/**
* @log_clear:
Expand Down Expand Up @@ -2422,8 +2425,10 @@ MemoryRegionSection memory_region_find(MemoryRegion *mr,
* memory_global_dirty_log_sync: synchronize the dirty log for all memory
*
* Synchronizes the dirty page log for all address spaces.
*
* @last_stage: whether this is the last stage of live migration
*/
void memory_global_dirty_log_sync(void);
void memory_global_dirty_log_sync(bool last_stage);

/**
* memory_global_dirty_log_sync: synchronize the dirty log for all memory
Expand Down
4 changes: 2 additions & 2 deletions migration/dirtyrate.c
Expand Up @@ -100,7 +100,7 @@ void global_dirty_log_change(unsigned int flag, bool start)
static void global_dirty_log_sync(unsigned int flag, bool one_shot)
{
qemu_mutex_lock_iothread();
memory_global_dirty_log_sync();
memory_global_dirty_log_sync(false);
if (one_shot) {
memory_global_dirty_log_stop(flag);
}
Expand Down Expand Up @@ -580,7 +580,7 @@ static void calculate_dirtyrate_dirty_bitmap(struct DirtyRateConfig config)
* skip it unconditionally and start dirty tracking
* from 2'round of log sync
*/
memory_global_dirty_log_sync();
memory_global_dirty_log_sync(false);

/*
* reset page protect manually and unconditionally.
Expand Down
20 changes: 10 additions & 10 deletions migration/ram.c
Expand Up @@ -1039,7 +1039,7 @@ static void migration_trigger_throttle(RAMState *rs)
}
}

static void migration_bitmap_sync(RAMState *rs)
static void migration_bitmap_sync(RAMState *rs, bool last_stage)
{
RAMBlock *block;
int64_t end_time;
Expand All @@ -1051,7 +1051,7 @@ static void migration_bitmap_sync(RAMState *rs)
}

trace_migration_bitmap_sync_start();
memory_global_dirty_log_sync();
memory_global_dirty_log_sync(last_stage);

qemu_mutex_lock(&rs->bitmap_mutex);
WITH_RCU_READ_LOCK_GUARD() {
Expand Down Expand Up @@ -1086,7 +1086,7 @@ static void migration_bitmap_sync(RAMState *rs)
}
}

static void migration_bitmap_sync_precopy(RAMState *rs)
static void migration_bitmap_sync_precopy(RAMState *rs, bool last_stage)
{
Error *local_err = NULL;

Expand All @@ -1099,7 +1099,7 @@ static void migration_bitmap_sync_precopy(RAMState *rs)
local_err = NULL;
}

migration_bitmap_sync(rs);
migration_bitmap_sync(rs, last_stage);

if (precopy_notify(PRECOPY_NOTIFY_AFTER_BITMAP_SYNC, &local_err)) {
error_report_err(local_err);
Expand Down Expand Up @@ -2699,7 +2699,7 @@ void ram_postcopy_send_discard_bitmap(MigrationState *ms)
RCU_READ_LOCK_GUARD();

/* This should be our last sync, the src is now paused */
migration_bitmap_sync(rs);
migration_bitmap_sync(rs, false);

/* Easiest way to make sure we don't resume in the middle of a host-page */
rs->pss[RAM_CHANNEL_PRECOPY].last_sent_block = NULL;
Expand Down Expand Up @@ -2890,7 +2890,7 @@ static void ram_init_bitmaps(RAMState *rs)
/* We don't use dirty log with background snapshots */
if (!migrate_background_snapshot()) {
memory_global_dirty_log_start(GLOBAL_DIRTY_MIGRATION);
migration_bitmap_sync_precopy(rs);
migration_bitmap_sync_precopy(rs, false);
}
}
qemu_mutex_unlock_ramlist();
Expand Down Expand Up @@ -3214,7 +3214,7 @@ static int ram_save_complete(QEMUFile *f, void *opaque)

WITH_RCU_READ_LOCK_GUARD() {
if (!migration_in_postcopy()) {
migration_bitmap_sync_precopy(rs);
migration_bitmap_sync_precopy(rs, true);
}

ram_control_before_iterate(f, RAM_CONTROL_FINISH);
Expand Down Expand Up @@ -3288,7 +3288,7 @@ static void ram_state_pending_exact(void *opaque, uint64_t *must_precopy,
if (!migration_in_postcopy() && remaining_size < s->threshold_size) {
qemu_mutex_lock_iothread();
WITH_RCU_READ_LOCK_GUARD() {
migration_bitmap_sync_precopy(rs);
migration_bitmap_sync_precopy(rs, false);
}
qemu_mutex_unlock_iothread();
remaining_size = rs->migration_dirty_pages * TARGET_PAGE_SIZE;
Expand Down Expand Up @@ -3523,7 +3523,7 @@ void colo_incoming_start_dirty_log(void)
qemu_mutex_lock_iothread();
qemu_mutex_lock_ramlist();

memory_global_dirty_log_sync();
memory_global_dirty_log_sync(false);
WITH_RCU_READ_LOCK_GUARD() {
RAMBLOCK_FOREACH_NOT_IGNORED(block) {
ramblock_sync_dirty_bitmap(ram_state, block);
Expand Down Expand Up @@ -3813,7 +3813,7 @@ void colo_flush_ram_cache(void)
void *src_host;
unsigned long offset = 0;

memory_global_dirty_log_sync();
memory_global_dirty_log_sync(false);
qemu_mutex_lock(&ram_state->bitmap_mutex);
WITH_RCU_READ_LOCK_GUARD() {
RAMBLOCK_FOREACH_NOT_IGNORED(block) {
Expand Down
10 changes: 5 additions & 5 deletions softmmu/memory.c
Expand Up @@ -2253,7 +2253,7 @@ void memory_region_set_dirty(MemoryRegion *mr, hwaddr addr,
* If memory region `mr' is NULL, do global sync. Otherwise, sync
* dirty bitmap for the specified memory region.
*/
static void memory_region_sync_dirty_bitmap(MemoryRegion *mr)
static void memory_region_sync_dirty_bitmap(MemoryRegion *mr, bool last_stage)
{
MemoryListener *listener;
AddressSpace *as;
Expand Down Expand Up @@ -2283,7 +2283,7 @@ static void memory_region_sync_dirty_bitmap(MemoryRegion *mr)
* is to do a global sync, because we are not capable to
* sync in a finer granularity.
*/
listener->log_sync_global(listener);
listener->log_sync_global(listener, last_stage);
trace_memory_region_sync_dirty(mr ? mr->name : "(all)", listener->name, 1);
}
}
Expand Down Expand Up @@ -2347,7 +2347,7 @@ DirtyBitmapSnapshot *memory_region_snapshot_and_clear_dirty(MemoryRegion *mr,
{
DirtyBitmapSnapshot *snapshot;
assert(mr->ram_block);
memory_region_sync_dirty_bitmap(mr);
memory_region_sync_dirty_bitmap(mr, false);
snapshot = cpu_physical_memory_snapshot_and_clear_dirty(mr, addr, size, client);
memory_global_after_dirty_log_sync();
return snapshot;
Expand Down Expand Up @@ -2873,9 +2873,9 @@ bool memory_region_present(MemoryRegion *container, hwaddr addr)
return mr && mr != container;
}

void memory_global_dirty_log_sync(void)
void memory_global_dirty_log_sync(bool last_stage)
{
memory_region_sync_dirty_bitmap(NULL);
memory_region_sync_dirty_bitmap(NULL, last_stage);
}

void memory_global_after_dirty_log_sync(void)
Expand Down

0 comments on commit ac503e8

Please sign in to comment.