Skip to content

Commit

Permalink
Merge remote-tracking branch 'remotes/borntraeger/tags/s390x-20190930…
Browse files Browse the repository at this point in the history
…' into staging

- do not abuse memory_region_allocate_system_memory and split the memory
  according to KVM memslots in KVM code instead (Paolo, Igor)
- change splitting to split at 4TB (Christian)
- do not claim s390 (31bit) support in configure (Thomas)
- sclp error checking (Janosch, Claudio)
- new s390 pci maintainer (Matt, Collin)
- fix s390 pci (again) (Matt)

# gpg: Signature made Mon 30 Sep 2019 12:52:51 BST
# gpg:                using RSA key 117BBC80B5A61C7C
# gpg: Good signature from "Christian Borntraeger (IBM) <borntraeger@de.ibm.com>" [full]
# Primary key fingerprint: F922 9381 A334 08F9 DBAB  FBCA 117B BC80 B5A6 1C7C

* remotes/borntraeger/tags/s390x-20190930:
  s390/kvm: split kvm mem slots at 4TB
  s390: do not call memory_region_allocate_system_memory() multiple times
  kvm: split too big memory section on several memslots
  kvm: clear dirty bitmaps from all overlapping memslots
  kvm: extract kvm_log_clear_one_slot
  configure: Remove s390 (31-bit mode) from the list of supported CPUs
  s390x: sclp: Report insufficient SCCB length
  s390x: sclp: fix error handling for oversize control blocks
  s390x: sclp: boundary check
  s390x: sclp: refactor invalid command check
  s390: PCI: fix IOMMU region init
  MAINTAINERS: Update S390 PCI Maintainer

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
  • Loading branch information
pm215 committed Sep 30, 2019
2 parents 786d36a + c5b9ce5 commit 95e9d74
Show file tree
Hide file tree
Showing 9 changed files with 203 additions and 128 deletions.
2 changes: 1 addition & 1 deletion MAINTAINERS
Expand Up @@ -1206,7 +1206,7 @@ T: git https://github.com/borntraeger/qemu.git s390-next
L: qemu-s390x@nongnu.org

S390 PCI
M: Collin Walling <walling@linux.ibm.com>
M: Matthew Rosato <mjrosato@linux.ibm.com>
S: Supported
F: hw/s390x/s390-pci*
L: qemu-s390x@nongnu.org
Expand Down
239 changes: 147 additions & 92 deletions accel/kvm/kvm-all.c
Expand Up @@ -140,6 +140,7 @@ bool kvm_direct_msi_allowed;
bool kvm_ioeventfd_any_length_allowed;
bool kvm_msi_use_devid;
static bool kvm_immediate_exit;
static hwaddr kvm_max_slot_size = ~0;

static const KVMCapabilityInfo kvm_required_capabilites[] = {
KVM_CAP_INFO(USER_MEMORY),
Expand Down Expand Up @@ -437,7 +438,7 @@ static int kvm_slot_update_flags(KVMMemoryListener *kml, KVMSlot *mem,
static int kvm_section_update_flags(KVMMemoryListener *kml,
MemoryRegionSection *section)
{
hwaddr start_addr, size;
hwaddr start_addr, size, slot_size;
KVMSlot *mem;
int ret = 0;

Expand All @@ -448,13 +449,18 @@ static int kvm_section_update_flags(KVMMemoryListener *kml,

kvm_slots_lock(kml);

mem = kvm_lookup_matching_slot(kml, start_addr, size);
if (!mem) {
/* We don't have a slot if we want to trap every access. */
goto out;
}
while (size && !ret) {
slot_size = MIN(kvm_max_slot_size, size);
mem = kvm_lookup_matching_slot(kml, start_addr, slot_size);
if (!mem) {
/* We don't have a slot if we want to trap every access. */
goto out;
}

ret = kvm_slot_update_flags(kml, mem, section->mr);
ret = kvm_slot_update_flags(kml, mem, section->mr);
start_addr += slot_size;
size -= slot_size;
}

out:
kvm_slots_unlock(kml);
Expand Down Expand Up @@ -527,11 +533,15 @@ static int kvm_physical_sync_dirty_bitmap(KVMMemoryListener *kml,
struct kvm_dirty_log d = {};
KVMSlot *mem;
hwaddr start_addr, size;
hwaddr slot_size, slot_offset = 0;
int ret = 0;

size = kvm_align_section(section, &start_addr);
if (size) {
mem = kvm_lookup_matching_slot(kml, start_addr, size);
while (size) {
MemoryRegionSection subsection = *section;

slot_size = MIN(kvm_max_slot_size, size);
mem = kvm_lookup_matching_slot(kml, start_addr, slot_size);
if (!mem) {
/* We don't have a slot if we want to trap every access. */
goto out;
Expand All @@ -549,11 +559,11 @@ static int kvm_physical_sync_dirty_bitmap(KVMMemoryListener *kml,
* So for now, let's align to 64 instead of HOST_LONG_BITS here, in
* a hope that sizeof(long) won't become >8 any time soon.
*/
size = ALIGN(((mem->memory_size) >> TARGET_PAGE_BITS),
/*HOST_LONG_BITS*/ 64) / 8;
if (!mem->dirty_bmap) {
hwaddr bitmap_size = ALIGN(((mem->memory_size) >> TARGET_PAGE_BITS),
/*HOST_LONG_BITS*/ 64) / 8;
/* Allocate on the first log_sync, once and for all */
mem->dirty_bmap = g_malloc0(size);
mem->dirty_bmap = g_malloc0(bitmap_size);
}

d.dirty_bitmap = mem->dirty_bmap;
Expand All @@ -564,7 +574,13 @@ static int kvm_physical_sync_dirty_bitmap(KVMMemoryListener *kml,
goto out;
}

kvm_get_dirty_pages_log_range(section, d.dirty_bitmap);
subsection.offset_within_region += slot_offset;
subsection.size = int128_make64(slot_size);
kvm_get_dirty_pages_log_range(&subsection, d.dirty_bitmap);

slot_offset += slot_size;
start_addr += slot_size;
size -= slot_size;
}
out:
return ret;
Expand All @@ -575,63 +591,22 @@ static int kvm_physical_sync_dirty_bitmap(KVMMemoryListener *kml,
#define KVM_CLEAR_LOG_ALIGN (qemu_real_host_page_size << KVM_CLEAR_LOG_SHIFT)
#define KVM_CLEAR_LOG_MASK (-KVM_CLEAR_LOG_ALIGN)

/**
* kvm_physical_log_clear - Clear the kernel's dirty bitmap for range
*
* NOTE: this will be a no-op if we haven't enabled manual dirty log
* protection in the host kernel because in that case this operation
* will be done within log_sync().
*
* @kml: the kvm memory listener
* @section: the memory range to clear dirty bitmap
*/
static int kvm_physical_log_clear(KVMMemoryListener *kml,
MemoryRegionSection *section)
static int kvm_log_clear_one_slot(KVMSlot *mem, int as_id, uint64_t start,
uint64_t size)
{
KVMState *s = kvm_state;
uint64_t end, bmap_start, start_delta, bmap_npages;
struct kvm_clear_dirty_log d;
uint64_t start, end, bmap_start, start_delta, bmap_npages, size;
unsigned long *bmap_clear = NULL, psize = qemu_real_host_page_size;
KVMSlot *mem = NULL;
int ret, i;

if (!s->manual_dirty_log_protect) {
/* No need to do explicit clear */
return 0;
}

start = section->offset_within_address_space;
size = int128_get64(section->size);

if (!size) {
/* Nothing more we can do... */
return 0;
}

kvm_slots_lock(kml);

/* Find any possible slot that covers the section */
for (i = 0; i < s->nr_slots; i++) {
mem = &kml->slots[i];
if (mem->start_addr <= start &&
start + size <= mem->start_addr + mem->memory_size) {
break;
}
}

/*
* We should always find one memslot until this point, otherwise
* there could be something wrong from the upper layer
*/
assert(mem && i != s->nr_slots);
int ret;

/*
* We need to extend either the start or the size or both to
* satisfy the KVM interface requirement. Firstly, do the start
* page alignment on 64 host pages
*/
bmap_start = (start - mem->start_addr) & KVM_CLEAR_LOG_MASK;
start_delta = start - mem->start_addr - bmap_start;
bmap_start = start & KVM_CLEAR_LOG_MASK;
start_delta = start - bmap_start;
bmap_start /= psize;

/*
Expand Down Expand Up @@ -694,7 +669,7 @@ static int kvm_physical_log_clear(KVMMemoryListener *kml,
/* It should never overflow. If it happens, say something */
assert(bmap_npages <= UINT32_MAX);
d.num_pages = bmap_npages;
d.slot = mem->slot | (kml->as_id << 16);
d.slot = mem->slot | (as_id << 16);

if (kvm_vm_ioctl(s, KVM_CLEAR_DIRTY_LOG, &d) == -1) {
ret = -errno;
Expand All @@ -717,6 +692,66 @@ static int kvm_physical_log_clear(KVMMemoryListener *kml,
size / psize);
/* This handles the NULL case well */
g_free(bmap_clear);
return ret;
}


/**
* kvm_physical_log_clear - Clear the kernel's dirty bitmap for range
*
* NOTE: this will be a no-op if we haven't enabled manual dirty log
* protection in the host kernel because in that case this operation
* will be done within log_sync().
*
* @kml: the kvm memory listener
* @section: the memory range to clear dirty bitmap
*/
static int kvm_physical_log_clear(KVMMemoryListener *kml,
MemoryRegionSection *section)
{
KVMState *s = kvm_state;
uint64_t start, size, offset, count;
KVMSlot *mem;
int ret, i;

if (!s->manual_dirty_log_protect) {
/* No need to do explicit clear */
return 0;
}

start = section->offset_within_address_space;
size = int128_get64(section->size);

if (!size) {
/* Nothing more we can do... */
return 0;
}

kvm_slots_lock(kml);

for (i = 0; i < s->nr_slots; i++) {
mem = &kml->slots[i];
/* Discard slots that are empty or do not overlap the section */
if (!mem->memory_size ||
mem->start_addr > start + size - 1 ||
start > mem->start_addr + mem->memory_size - 1) {
continue;
}

if (start >= mem->start_addr) {
/* The slot starts before section or is aligned to it. */
offset = start - mem->start_addr;
count = MIN(mem->memory_size - offset, size);
} else {
/* The slot starts after section. */
offset = 0;
count = MIN(mem->memory_size, size - (mem->start_addr - start));
}
ret = kvm_log_clear_one_slot(mem, kml->as_id, offset, count);
if (ret < 0) {
break;
}
}

kvm_slots_unlock(kml);

Expand Down Expand Up @@ -953,14 +988,22 @@ kvm_check_extension_list(KVMState *s, const KVMCapabilityInfo *list)
return NULL;
}

void kvm_set_max_memslot_size(hwaddr max_slot_size)
{
g_assert(
ROUND_UP(max_slot_size, qemu_real_host_page_size) == max_slot_size
);
kvm_max_slot_size = max_slot_size;
}

static void kvm_set_phys_mem(KVMMemoryListener *kml,
MemoryRegionSection *section, bool add)
{
KVMSlot *mem;
int err;
MemoryRegion *mr = section->mr;
bool writeable = !mr->readonly && !mr->rom_device;
hwaddr start_addr, size;
hwaddr start_addr, size, slot_size;
void *ram;

if (!memory_region_is_ram(mr)) {
Expand All @@ -985,41 +1028,52 @@ static void kvm_set_phys_mem(KVMMemoryListener *kml,
kvm_slots_lock(kml);

if (!add) {
mem = kvm_lookup_matching_slot(kml, start_addr, size);
if (!mem) {
goto out;
}
if (mem->flags & KVM_MEM_LOG_DIRTY_PAGES) {
kvm_physical_sync_dirty_bitmap(kml, section);
}
do {
slot_size = MIN(kvm_max_slot_size, size);
mem = kvm_lookup_matching_slot(kml, start_addr, slot_size);
if (!mem) {
goto out;
}
if (mem->flags & KVM_MEM_LOG_DIRTY_PAGES) {
kvm_physical_sync_dirty_bitmap(kml, section);
}

/* unregister the slot */
g_free(mem->dirty_bmap);
mem->dirty_bmap = NULL;
mem->memory_size = 0;
mem->flags = 0;
err = kvm_set_user_memory_region(kml, mem, false);
if (err) {
fprintf(stderr, "%s: error unregistering slot: %s\n",
__func__, strerror(-err));
abort();
}
/* unregister the slot */
g_free(mem->dirty_bmap);
mem->dirty_bmap = NULL;
mem->memory_size = 0;
mem->flags = 0;
err = kvm_set_user_memory_region(kml, mem, false);
if (err) {
fprintf(stderr, "%s: error unregistering slot: %s\n",
__func__, strerror(-err));
abort();
}
start_addr += slot_size;
size -= slot_size;
} while (size);
goto out;
}

/* register the new slot */
mem = kvm_alloc_slot(kml);
mem->memory_size = size;
mem->start_addr = start_addr;
mem->ram = ram;
mem->flags = kvm_mem_flags(mr);

err = kvm_set_user_memory_region(kml, mem, true);
if (err) {
fprintf(stderr, "%s: error registering slot: %s\n", __func__,
strerror(-err));
abort();
}
do {
slot_size = MIN(kvm_max_slot_size, size);
mem = kvm_alloc_slot(kml);
mem->memory_size = slot_size;
mem->start_addr = start_addr;
mem->ram = ram;
mem->flags = kvm_mem_flags(mr);

err = kvm_set_user_memory_region(kml, mem, true);
if (err) {
fprintf(stderr, "%s: error registering slot: %s\n", __func__,
strerror(-err));
abort();
}
start_addr += slot_size;
ram += slot_size;
size -= slot_size;
} while (size);

out:
kvm_slots_unlock(kml);
Expand Down Expand Up @@ -2859,6 +2913,7 @@ static bool kvm_accel_has_memory(MachineState *ms, AddressSpace *as,

for (i = 0; i < kvm->nr_as; ++i) {
if (kvm->as[i].as == as && kvm->as[i].ml) {
size = MIN(kvm_max_slot_size, size);
return NULL != kvm_lookup_matching_slot(kvm->as[i].ml,
start_addr, size);
}
Expand Down
2 changes: 1 addition & 1 deletion configure
Expand Up @@ -728,7 +728,7 @@ ARCH=
# Normalise host CPU name and set ARCH.
# Note that this case should only have supported host CPUs, not guests.
case "$cpu" in
ppc|ppc64|s390|s390x|sparc64|x32|riscv32|riscv64)
ppc|ppc64|s390x|sparc64|x32|riscv32|riscv64)
supported_cpu="yes"
;;
ppc64le)
Expand Down
3 changes: 0 additions & 3 deletions hw/s390x/event-facility.c
Expand Up @@ -377,9 +377,6 @@ static void command_handler(SCLPEventFacility *ef, SCCB *sccb, uint64_t code)
case SCLP_CMD_WRITE_EVENT_MASK:
write_event_mask(ef, sccb);
break;
default:
sccb->h.response_code = cpu_to_be16(SCLP_RC_INVALID_SCLP_COMMAND);
break;
}
}

Expand Down
7 changes: 6 additions & 1 deletion hw/s390x/s390-pci-bus.c
Expand Up @@ -695,10 +695,15 @@ static const MemoryRegionOps s390_msi_ctrl_ops = {

void s390_pci_iommu_enable(S390PCIIOMMU *iommu)
{
/*
* The iommu region is initialized against a 0-mapped address space,
* so the smallest IOMMU region we can define runs from 0 to the end
* of the PCI address space.
*/
char *name = g_strdup_printf("iommu-s390-%04x", iommu->pbdev->uid);
memory_region_init_iommu(&iommu->iommu_mr, sizeof(iommu->iommu_mr),
TYPE_S390_IOMMU_MEMORY_REGION, OBJECT(&iommu->mr),
name, iommu->pal - iommu->pba + 1);
name, iommu->pal + 1);
iommu->enabled = true;
memory_region_add_subregion(&iommu->mr, 0, MEMORY_REGION(&iommu->iommu_mr));
g_free(name);
Expand Down

0 comments on commit 95e9d74

Please sign in to comment.