Skip to content

Commit

Permalink
Replace qemu_real_host_page variables with inlined functions
Browse files Browse the repository at this point in the history
Replace the global variables with inlined helper functions. getpagesize() is very
likely annotated with a "const" function attribute (at least with glibc), and thus
optimization should apply even better.

This avoids the need for a constructor initialization too.

Signed-off-by: Marc-André Lureau <marcandre.lureau@redhat.com>
Message-Id: <20220323155743.1585078-12-marcandre.lureau@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
  • Loading branch information
elmarco authored and bonzini committed Apr 3, 2022
1 parent 742f238 commit 51f2287
Show file tree
Hide file tree
Showing 53 changed files with 150 additions and 162 deletions.
2 changes: 1 addition & 1 deletion accel/hvf/hvf-accel-ops.c
Expand Up @@ -122,7 +122,7 @@ static void hvf_set_phys_mem(MemoryRegionSection *section, bool add)
MemoryRegion *area = section->mr;
bool writeable = !area->readonly && !area->rom_device;
hv_memory_flags_t flags;
uint64_t page_size = qemu_real_host_page_size;
uint64_t page_size = qemu_real_host_page_size();

if (!memory_region_is_ram(area)) {
if (writeable) {
Expand Down
20 changes: 10 additions & 10 deletions accel/kvm/kvm-all.c
Expand Up @@ -59,7 +59,7 @@
#ifdef PAGE_SIZE
#undef PAGE_SIZE
#endif
#define PAGE_SIZE qemu_real_host_page_size
#define PAGE_SIZE qemu_real_host_page_size()

#ifndef KVM_GUESTDBG_BLOCKIRQ
#define KVM_GUESTDBG_BLOCKIRQ 0
Expand Down Expand Up @@ -324,14 +324,14 @@ static hwaddr kvm_align_section(MemoryRegionSection *section,
with sub-page size and unaligned start address. Pad the start
address to next and truncate size to previous page boundary. */
aligned = ROUND_UP(section->offset_within_address_space,
qemu_real_host_page_size);
qemu_real_host_page_size());
delta = aligned - section->offset_within_address_space;
*start = aligned;
if (delta > size) {
return 0;
}

return (size - delta) & qemu_real_host_page_mask;
return (size - delta) & qemu_real_host_page_mask();
}

int kvm_physical_memory_addr_from_host(KVMState *s, void *ram,
Expand Down Expand Up @@ -626,7 +626,7 @@ static void kvm_log_stop(MemoryListener *listener,
static void kvm_slot_sync_dirty_pages(KVMSlot *slot)
{
ram_addr_t start = slot->ram_start_offset;
ram_addr_t pages = slot->memory_size / qemu_real_host_page_size;
ram_addr_t pages = slot->memory_size / qemu_real_host_page_size();

cpu_physical_memory_set_dirty_lebitmap(slot->dirty_bmap, start, pages);
}
Expand Down Expand Up @@ -662,7 +662,7 @@ static void kvm_slot_init_dirty_bitmap(KVMSlot *mem)
* And mem->memory_size is aligned to it (otherwise this mem can't
* be registered to KVM).
*/
hwaddr bitmap_size = ALIGN(mem->memory_size / qemu_real_host_page_size,
hwaddr bitmap_size = ALIGN(mem->memory_size / qemu_real_host_page_size(),
/*HOST_LONG_BITS*/ 64) / 8;
mem->dirty_bmap = g_malloc0(bitmap_size);
mem->dirty_bmap_size = bitmap_size;
Expand Down Expand Up @@ -707,7 +707,7 @@ static void kvm_dirty_ring_mark_page(KVMState *s, uint32_t as_id,
mem = &kml->slots[slot_id];

if (!mem->memory_size || offset >=
(mem->memory_size / qemu_real_host_page_size)) {
(mem->memory_size / qemu_real_host_page_size())) {
return;
}

Expand Down Expand Up @@ -895,7 +895,7 @@ static void kvm_physical_sync_dirty_bitmap(KVMMemoryListener *kml,

/* Alignment requirement for KVM_CLEAR_DIRTY_LOG - 64 pages */
#define KVM_CLEAR_LOG_SHIFT 6
#define KVM_CLEAR_LOG_ALIGN (qemu_real_host_page_size << KVM_CLEAR_LOG_SHIFT)
#define KVM_CLEAR_LOG_ALIGN (qemu_real_host_page_size() << KVM_CLEAR_LOG_SHIFT)
#define KVM_CLEAR_LOG_MASK (-KVM_CLEAR_LOG_ALIGN)

static int kvm_log_clear_one_slot(KVMSlot *mem, int as_id, uint64_t start,
Expand All @@ -904,7 +904,7 @@ static int kvm_log_clear_one_slot(KVMSlot *mem, int as_id, uint64_t start,
KVMState *s = kvm_state;
uint64_t end, bmap_start, start_delta, bmap_npages;
struct kvm_clear_dirty_log d;
unsigned long *bmap_clear = NULL, psize = qemu_real_host_page_size;
unsigned long *bmap_clear = NULL, psize = qemu_real_host_page_size();
int ret;

/*
Expand Down Expand Up @@ -1335,7 +1335,7 @@ kvm_check_extension_list(KVMState *s, const KVMCapabilityInfo *list)
void kvm_set_max_memslot_size(hwaddr max_slot_size)
{
g_assert(
ROUND_UP(max_slot_size, qemu_real_host_page_size) == max_slot_size
ROUND_UP(max_slot_size, qemu_real_host_page_size()) == max_slot_size
);
kvm_max_slot_size = max_slot_size;
}
Expand Down Expand Up @@ -2341,7 +2341,7 @@ static int kvm_init(MachineState *ms)
* even with KVM. TARGET_PAGE_SIZE is assumed to be the minimum
* page size for the system though.
*/
assert(TARGET_PAGE_SIZE <= qemu_real_host_page_size);
assert(TARGET_PAGE_SIZE <= qemu_real_host_page_size());

s->sigmask_len = 8;

Expand Down
2 changes: 1 addition & 1 deletion backends/hostmem.c
Expand Up @@ -319,7 +319,7 @@ size_t host_memory_backend_pagesize(HostMemoryBackend *memdev)
#else
size_t host_memory_backend_pagesize(HostMemoryBackend *memdev)
{
return qemu_real_host_page_size;
return qemu_real_host_page_size();
}
#endif

Expand Down
4 changes: 2 additions & 2 deletions block.c
Expand Up @@ -135,7 +135,7 @@ size_t bdrv_opt_mem_align(BlockDriverState *bs)
{
if (!bs || !bs->drv) {
/* page size or 4k (hdd sector size) should be on the safe side */
return MAX(4096, qemu_real_host_page_size);
return MAX(4096, qemu_real_host_page_size());
}
IO_CODE();

Expand All @@ -146,7 +146,7 @@ size_t bdrv_min_mem_align(BlockDriverState *bs)
{
if (!bs || !bs->drv) {
/* page size or 4k (hdd sector size) should be on the safe side */
return MAX(4096, qemu_real_host_page_size);
return MAX(4096, qemu_real_host_page_size());
}
IO_CODE();

Expand Down
6 changes: 3 additions & 3 deletions block/file-posix.c
Expand Up @@ -386,7 +386,7 @@ static void raw_probe_alignment(BlockDriverState *bs, int fd, Error **errp)
{
BDRVRawState *s = bs->opaque;
char *buf;
size_t max_align = MAX(MAX_BLOCKSIZE, qemu_real_host_page_size);
size_t max_align = MAX(MAX_BLOCKSIZE, qemu_real_host_page_size());
size_t alignments[] = {1, 512, 1024, 2048, 4096};

/* For SCSI generic devices the alignment is not really used.
Expand Down Expand Up @@ -1261,7 +1261,7 @@ static void raw_refresh_limits(BlockDriverState *bs, Error **errp)
raw_probe_alignment(bs, s->fd, errp);

bs->bl.min_mem_alignment = s->buf_align;
bs->bl.opt_mem_alignment = MAX(s->buf_align, qemu_real_host_page_size);
bs->bl.opt_mem_alignment = MAX(s->buf_align, qemu_real_host_page_size());

/*
* Maximum transfers are best effort, so it is okay to ignore any
Expand Down Expand Up @@ -1886,7 +1886,7 @@ static int allocate_first_block(int fd, size_t max_size)
size_t write_size = (max_size < MAX_BLOCKSIZE)
? BDRV_SECTOR_SIZE
: MAX_BLOCKSIZE;
size_t max_align = MAX(MAX_BLOCKSIZE, qemu_real_host_page_size);
size_t max_align = MAX(MAX_BLOCKSIZE, qemu_real_host_page_size());
void *buf;
ssize_t n;
int ret;
Expand Down
2 changes: 1 addition & 1 deletion block/io.c
Expand Up @@ -201,7 +201,7 @@ void bdrv_refresh_limits(BlockDriverState *bs, Transaction *tran, Error **errp)

if (!have_limits) {
bs->bl.min_mem_alignment = 512;
bs->bl.opt_mem_alignment = qemu_real_host_page_size;
bs->bl.opt_mem_alignment = qemu_real_host_page_size();

/* Safe default since most protocols use readv()/writev()/etc */
bs->bl.max_iov = IOV_MAX;
Expand Down
22 changes: 11 additions & 11 deletions block/nvme.c
Expand Up @@ -169,9 +169,9 @@ static bool nvme_init_queue(BDRVNVMeState *s, NVMeQueue *q,
size_t bytes;
int r;

bytes = ROUND_UP(nentries * entry_bytes, qemu_real_host_page_size);
bytes = ROUND_UP(nentries * entry_bytes, qemu_real_host_page_size());
q->head = q->tail = 0;
q->queue = qemu_try_memalign(qemu_real_host_page_size, bytes);
q->queue = qemu_try_memalign(qemu_real_host_page_size(), bytes);
if (!q->queue) {
error_setg(errp, "Cannot allocate queue");
return false;
Expand Down Expand Up @@ -232,8 +232,8 @@ static NVMeQueuePair *nvme_create_queue_pair(BDRVNVMeState *s,
trace_nvme_create_queue_pair(idx, q, size, aio_context,
event_notifier_get_fd(s->irq_notifier));
bytes = QEMU_ALIGN_UP(s->page_size * NVME_NUM_REQS,
qemu_real_host_page_size);
q->prp_list_pages = qemu_try_memalign(qemu_real_host_page_size, bytes);
qemu_real_host_page_size());
q->prp_list_pages = qemu_try_memalign(qemu_real_host_page_size(), bytes);
if (!q->prp_list_pages) {
error_setg(errp, "Cannot allocate PRP page list");
goto fail;
Expand Down Expand Up @@ -533,9 +533,9 @@ static bool nvme_identify(BlockDriverState *bs, int namespace, Error **errp)
.opcode = NVME_ADM_CMD_IDENTIFY,
.cdw10 = cpu_to_le32(0x1),
};
size_t id_size = QEMU_ALIGN_UP(sizeof(*id), qemu_real_host_page_size);
size_t id_size = QEMU_ALIGN_UP(sizeof(*id), qemu_real_host_page_size());

id = qemu_try_memalign(qemu_real_host_page_size, id_size);
id = qemu_try_memalign(qemu_real_host_page_size(), id_size);
if (!id) {
error_setg(errp, "Cannot allocate buffer for identify response");
goto out;
Expand Down Expand Up @@ -1048,7 +1048,7 @@ static coroutine_fn int nvme_cmd_map_qiov(BlockDriverState *bs, NvmeCmd *cmd,
bool retry = true;
uint64_t iova;
size_t len = QEMU_ALIGN_UP(qiov->iov[i].iov_len,
qemu_real_host_page_size);
qemu_real_host_page_size());
try_map:
r = qemu_vfio_dma_map(s->vfio,
qiov->iov[i].iov_base,
Expand Down Expand Up @@ -1224,8 +1224,8 @@ static inline bool nvme_qiov_aligned(BlockDriverState *bs,

for (i = 0; i < qiov->niov; ++i) {
if (!QEMU_PTR_IS_ALIGNED(qiov->iov[i].iov_base,
qemu_real_host_page_size) ||
!QEMU_IS_ALIGNED(qiov->iov[i].iov_len, qemu_real_host_page_size)) {
qemu_real_host_page_size()) ||
!QEMU_IS_ALIGNED(qiov->iov[i].iov_len, qemu_real_host_page_size())) {
trace_nvme_qiov_unaligned(qiov, i, qiov->iov[i].iov_base,
qiov->iov[i].iov_len, s->page_size);
return false;
Expand All @@ -1241,7 +1241,7 @@ static int nvme_co_prw(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
int r;
QEMU_AUTO_VFREE uint8_t *buf = NULL;
QEMUIOVector local_qiov;
size_t len = QEMU_ALIGN_UP(bytes, qemu_real_host_page_size);
size_t len = QEMU_ALIGN_UP(bytes, qemu_real_host_page_size());
assert(QEMU_IS_ALIGNED(offset, s->page_size));
assert(QEMU_IS_ALIGNED(bytes, s->page_size));
assert(bytes <= s->max_transfer);
Expand All @@ -1251,7 +1251,7 @@ static int nvme_co_prw(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
}
s->stats.unaligned_accesses++;
trace_nvme_prw_buffered(s, offset, bytes, qiov->niov, is_write);
buf = qemu_try_memalign(qemu_real_host_page_size, len);
buf = qemu_try_memalign(qemu_real_host_page_size(), len);

if (!buf) {
return -ENOMEM;
Expand Down
2 changes: 1 addition & 1 deletion block/parallels.c
Expand Up @@ -870,7 +870,7 @@ static int parallels_open(BlockDriverState *bs, QDict *options, int flags,
}
}

s->bat_dirty_block = 4 * qemu_real_host_page_size;
s->bat_dirty_block = 4 * qemu_real_host_page_size();
s->bat_dirty_bmap =
bitmap_new(DIV_ROUND_UP(s->header_size, s->bat_dirty_block));

Expand Down
2 changes: 1 addition & 1 deletion block/qcow2-cache.c
Expand Up @@ -75,7 +75,7 @@ static void qcow2_cache_table_release(Qcow2Cache *c, int i, int num_tables)
/* Using MADV_DONTNEED to discard memory is a Linux-specific feature */
#ifdef CONFIG_LINUX
void *t = qcow2_cache_get_table_addr(c, i);
int align = qemu_real_host_page_size;
int align = qemu_real_host_page_size();
size_t mem_size = (size_t) c->table_size * num_tables;
size_t offset = QEMU_ALIGN_UP((uintptr_t) t, align) - (uintptr_t) t;
size_t length = QEMU_ALIGN_DOWN(mem_size - offset, align);
Expand Down
2 changes: 1 addition & 1 deletion bsd-user/elfload.c
Expand Up @@ -246,7 +246,7 @@ static void padzero(abi_ulong elf_bss, abi_ulong last_bss)
* patch target_mmap(), but it is more complicated as the file
* size must be known.
*/
if (qemu_real_host_page_size < qemu_host_page_size) {
if (qemu_real_host_page_size() < qemu_host_page_size) {
abi_ulong end_addr, end_addr1;
end_addr1 = REAL_HOST_PAGE_ALIGN(elf_bss);
end_addr = HOST_PAGE_ALIGN(elf_bss);
Expand Down
2 changes: 1 addition & 1 deletion bsd-user/mmap.c
Expand Up @@ -515,7 +515,7 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int prot,
* up to the targets page boundary.
*/

if ((qemu_real_host_page_size < qemu_host_page_size) && fd != -1) {
if ((qemu_real_host_page_size() < qemu_host_page_size) && fd != -1) {
struct stat sb;

if (fstat(fd, &sb) == -1) {
Expand Down
2 changes: 1 addition & 1 deletion contrib/vhost-user-gpu/vugbm.c
Expand Up @@ -53,7 +53,7 @@ struct udmabuf_create {
static size_t
udmabuf_get_size(struct vugbm_buffer *buf)
{
return ROUND_UP(buf->width * buf->height * 4, qemu_real_host_page_size);
return ROUND_UP(buf->width * buf->height * 4, qemu_real_host_page_size());
}

static bool
Expand Down
2 changes: 1 addition & 1 deletion cpu.c
Expand Up @@ -481,7 +481,7 @@ void page_size_init(void)
/* NOTE: we can always suppose that qemu_host_page_size >=
TARGET_PAGE_SIZE */
if (qemu_host_page_size == 0) {
qemu_host_page_size = qemu_real_host_page_size;
qemu_host_page_size = qemu_real_host_page_size();
}
if (qemu_host_page_size < TARGET_PAGE_SIZE) {
qemu_host_page_size = TARGET_PAGE_SIZE;
Expand Down
2 changes: 1 addition & 1 deletion hw/display/qxl.c
Expand Up @@ -320,7 +320,7 @@ static ram_addr_t qxl_rom_size(void)
#define QXL_ROM_SZ 8192

QEMU_BUILD_BUG_ON(QXL_REQUIRED_SZ > QXL_ROM_SZ);
return QEMU_ALIGN_UP(QXL_REQUIRED_SZ, qemu_real_host_page_size);
return QEMU_ALIGN_UP(QXL_REQUIRED_SZ, qemu_real_host_page_size());
}

static void init_qxl_rom(PCIQXLDevice *d)
Expand Down
2 changes: 1 addition & 1 deletion hw/intc/s390_flic_kvm.c
Expand Up @@ -24,7 +24,7 @@
#include "trace.h"
#include "qom/object.h"

#define FLIC_SAVE_INITIAL_SIZE qemu_real_host_page_size
#define FLIC_SAVE_INITIAL_SIZE qemu_real_host_page_size()
#define FLIC_FAILED (-1UL)
#define FLIC_SAVEVM_VERSION 1

Expand Down
6 changes: 3 additions & 3 deletions hw/nvram/fw_cfg.c
Expand Up @@ -622,9 +622,9 @@ static bool fw_cfg_acpi_mr_restore(void *opaque)
FWCfgState *s = opaque;
bool mr_aligned;

mr_aligned = QEMU_IS_ALIGNED(s->table_mr_size, qemu_real_host_page_size) &&
QEMU_IS_ALIGNED(s->linker_mr_size, qemu_real_host_page_size) &&
QEMU_IS_ALIGNED(s->rsdp_mr_size, qemu_real_host_page_size);
mr_aligned = QEMU_IS_ALIGNED(s->table_mr_size, qemu_real_host_page_size()) &&
QEMU_IS_ALIGNED(s->linker_mr_size, qemu_real_host_page_size()) &&
QEMU_IS_ALIGNED(s->rsdp_mr_size, qemu_real_host_page_size());
return s->acpi_mr_restore && !mr_aligned;
}

Expand Down
2 changes: 1 addition & 1 deletion hw/ppc/mac_newworld.c
Expand Up @@ -456,7 +456,7 @@ static void ppc_core99_init(MachineState *machine)
}

/* The NewWorld NVRAM is not located in the MacIO device */
if (kvm_enabled() && qemu_real_host_page_size > 4096) {
if (kvm_enabled() && qemu_real_host_page_size() > 4096) {
/* We can't combine read-write and read-only in a single page, so
move the NVRAM out of ROM again for KVM */
nvram_addr = 0xFFE00000;
Expand Down
2 changes: 1 addition & 1 deletion hw/ppc/spapr_pci.c
Expand Up @@ -1978,7 +1978,7 @@ static void spapr_phb_realize(DeviceState *dev, Error **errp)
* our memory slot is of page size granularity.
*/
if (kvm_enabled()) {
msi_window_size = qemu_real_host_page_size;
msi_window_size = qemu_real_host_page_size();
}

memory_region_init_io(&sphb->msiwindow, OBJECT(sphb), &spapr_msi_ops, spapr,
Expand Down
2 changes: 1 addition & 1 deletion hw/rdma/vmw/pvrdma_main.c
Expand Up @@ -608,7 +608,7 @@ static void pvrdma_realize(PCIDevice *pdev, Error **errp)
rdma_info_report("Initializing device %s %x.%x", pdev->name,
PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));

if (TARGET_PAGE_SIZE != qemu_real_host_page_size) {
if (TARGET_PAGE_SIZE != qemu_real_host_page_size()) {
error_setg(errp, "Target page size must be the same as host page size");
return;
}
Expand Down
2 changes: 1 addition & 1 deletion hw/scsi/scsi-generic.c
Expand Up @@ -183,7 +183,7 @@ static int scsi_handle_inquiry_reply(SCSIGenericReq *r, SCSIDevice *s, int len)
uint32_t max_iov = blk_get_max_hw_iov(s->conf.blk);

assert(max_transfer);
max_transfer = MIN_NON_ZERO(max_transfer, max_iov * qemu_real_host_page_size)
max_transfer = MIN_NON_ZERO(max_transfer, max_iov * qemu_real_host_page_size())
/ s->blocksize;
stl_be_p(&r->buf[8], max_transfer);
/* Also take care of the opt xfer len. */
Expand Down
2 changes: 1 addition & 1 deletion hw/tpm/tpm_ppi.c
Expand Up @@ -47,7 +47,7 @@ void tpm_ppi_reset(TPMPPI *tpmppi)
void tpm_ppi_init(TPMPPI *tpmppi, MemoryRegion *m,
hwaddr addr, Object *obj)
{
tpmppi->buf = qemu_memalign(qemu_real_host_page_size,
tpmppi->buf = qemu_memalign(qemu_real_host_page_size(),
HOST_PAGE_ALIGN(TPM_PPI_ADDR_SIZE));
memory_region_init_ram_device_ptr(&tpmppi->ram, obj, "tpm-ppi",
TPM_PPI_ADDR_SIZE, tpmppi->buf);
Expand Down

0 comments on commit 51f2287

Please sign in to comment.