Skip to content

Commit

Permalink
accel/tcg: Introduce tlb_set_page_full
Browse files Browse the repository at this point in the history
Now that we have collected all of the page data into
CPUTLBEntryFull, provide an interface to record that
all in one go, instead of using 4 arguments.  This interface
allows CPUTLBEntryFull to be extended without having to
change the number of arguments.

Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
  • Loading branch information
rth7680 committed Oct 4, 2022
1 parent af803a4 commit 4047368
Show file tree
Hide file tree
Showing 3 changed files with 69 additions and 18 deletions.
51 changes: 33 additions & 18 deletions accel/tcg/cputlb.c
Expand Up @@ -1095,16 +1095,16 @@ static void tlb_add_large_page(CPUArchState *env, int mmu_idx,
env_tlb(env)->d[mmu_idx].large_page_mask = lp_mask;
}

/* Add a new TLB entry. At most one entry for a given virtual address
/*
* Add a new TLB entry. At most one entry for a given virtual address
* is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
* supplied size is only used by tlb_flush_page.
*
* Called from TCG-generated code, which is under an RCU read-side
* critical section.
*/
void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
hwaddr paddr, MemTxAttrs attrs, int prot,
int mmu_idx, target_ulong size)
void tlb_set_page_full(CPUState *cpu, int mmu_idx,
target_ulong vaddr, CPUTLBEntryFull *full)
{
CPUArchState *env = cpu->env_ptr;
CPUTLB *tlb = env_tlb(env);
Expand All @@ -1117,35 +1117,36 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
CPUTLBEntry *te, tn;
hwaddr iotlb, xlat, sz, paddr_page;
target_ulong vaddr_page;
int asidx = cpu_asidx_from_attrs(cpu, attrs);
int wp_flags;
int asidx, wp_flags, prot;
bool is_ram, is_romd;

assert_cpu_is_self(cpu);

if (size <= TARGET_PAGE_SIZE) {
if (full->lg_page_size <= TARGET_PAGE_BITS) {
sz = TARGET_PAGE_SIZE;
} else {
tlb_add_large_page(env, mmu_idx, vaddr, size);
sz = size;
sz = (hwaddr)1 << full->lg_page_size;
tlb_add_large_page(env, mmu_idx, vaddr, sz);
}
vaddr_page = vaddr & TARGET_PAGE_MASK;
paddr_page = paddr & TARGET_PAGE_MASK;
paddr_page = full->phys_addr & TARGET_PAGE_MASK;

prot = full->prot;
asidx = cpu_asidx_from_attrs(cpu, full->attrs);
section = address_space_translate_for_iotlb(cpu, asidx, paddr_page,
&xlat, &sz, attrs, &prot);
&xlat, &sz, full->attrs, &prot);
assert(sz >= TARGET_PAGE_SIZE);

tlb_debug("vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
" prot=%x idx=%d\n",
vaddr, paddr, prot, mmu_idx);
vaddr, full->phys_addr, prot, mmu_idx);

address = vaddr_page;
if (size < TARGET_PAGE_SIZE) {
if (full->lg_page_size < TARGET_PAGE_BITS) {
/* Repeat the MMU check and TLB fill on every access. */
address |= TLB_INVALID_MASK;
}
if (attrs.byte_swap) {
if (full->attrs.byte_swap) {
address |= TLB_BSWAP;
}

Expand Down Expand Up @@ -1236,8 +1237,10 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
* subtract here is that of the page base, and not the same as the
* vaddr we add back in io_readx()/io_writex()/get_page_addr_code().
*/
desc->fulltlb[index] = *full;
desc->fulltlb[index].xlat_section = iotlb - vaddr_page;
desc->fulltlb[index].attrs = attrs;
desc->fulltlb[index].phys_addr = paddr_page;
desc->fulltlb[index].prot = prot;

/* Now calculate the new entry */
tn.addend = addend - vaddr_page;
Expand Down Expand Up @@ -1272,9 +1275,21 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
qemu_spin_unlock(&tlb->c.lock);
}

/* Add a new TLB entry, but without specifying the memory
* transaction attributes to be used.
*/
void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
hwaddr paddr, MemTxAttrs attrs, int prot,
int mmu_idx, target_ulong size)
{
CPUTLBEntryFull full = {
.phys_addr = paddr,
.attrs = attrs,
.prot = prot,
.lg_page_size = ctz64(size)
};

assert(is_power_of_2(size));
tlb_set_page_full(cpu, mmu_idx, vaddr, &full);
}

void tlb_set_page(CPUState *cpu, target_ulong vaddr,
hwaddr paddr, int prot,
int mmu_idx, target_ulong size)
Expand Down
14 changes: 14 additions & 0 deletions include/exec/cpu-defs.h
Expand Up @@ -148,7 +148,21 @@ typedef struct CPUTLBEntryFull {
* + the offset within the target MemoryRegion (otherwise)
*/
hwaddr xlat_section;

/*
* @phys_addr contains the physical address in the address space
* given by cpu_asidx_from_attrs(cpu, @attrs).
*/
hwaddr phys_addr;

/* @attrs contains the memory transaction attributes for the page. */
MemTxAttrs attrs;

/* @prot contains the complete protections for the page. */
uint8_t prot;

/* @lg_page_size contains the log2 of the page size. */
uint8_t lg_page_size;
} CPUTLBEntryFull;

/*
Expand Down
22 changes: 22 additions & 0 deletions include/exec/exec-all.h
Expand Up @@ -257,6 +257,28 @@ void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *cpu,
uint16_t idxmap,
unsigned bits);

/**
* tlb_set_page_full:
* @cpu: CPU context
* @mmu_idx: mmu index of the tlb to modify
* @vaddr: virtual address of the entry to add
* @full: the details of the tlb entry
*
* Add an entry to @cpu tlb index @mmu_idx. All of the fields of
* @full must be filled, except for xlat_section, and constitute
* the complete description of the translated page.
*
* This is generally called by the target tlb_fill function after
* having performed a successful page table walk to find the physical
* address and attributes for the translation.
*
* At most one entry for a given virtual address is permitted. Only a
* single TARGET_PAGE_SIZE region is mapped; @full->lg_page_size is only
* used by tlb_flush_page.
*/
void tlb_set_page_full(CPUState *cpu, int mmu_idx, target_ulong vaddr,
CPUTLBEntryFull *full);

/**
* tlb_set_page_with_attrs:
* @cpu: CPU to add this TLB entry for
Expand Down

0 comments on commit 4047368

Please sign in to comment.