Skip to content

Commit

Permalink
translate-all.c: Compute L1 page table properties at runtime
Browse files Browse the repository at this point in the history
Remove L1 page mapping table properties computing
statically using macros which is dependent on
TARGET_PAGE_BITS. Drop macros V_L1_SIZE, V_L1_SHIFT,
V_L1_BITS macros and replace with variables which are
computed at early stage of VM boot.

Removing dependency can help to make TARGET_PAGE_BITS
dynamic.

Signed-off-by: Vijaya Kumar K <vijayak@cavium.com>
Message-id: 1465808915-4887-4-git-send-email-vijayak@caviumnetworks.com
[PMM:
 assert(v_l1_shift % V_L2_BITS == 0)
 cache v_l2_levels
 initialize from page_init() rather than vl.c
 minor code style fixes
 put v_l1_size into a local where used as a loop limit]
Reviewed-by: Richard Henderson <rth@twiddle.net>
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
  • Loading branch information
Vijaya Kumar K authored and pm215 committed Oct 24, 2016
1 parent 2615fab commit 66ec9f4
Showing 1 changed file with 46 additions and 25 deletions.
71 changes: 46 additions & 25 deletions translate-all.c
Expand Up @@ -97,25 +97,24 @@ typedef struct PageDesc {
#define V_L2_BITS 10
#define V_L2_SIZE (1 << V_L2_BITS)

/* The bits remaining after N lower levels of page tables. */
#define V_L1_BITS_REM \
((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % V_L2_BITS)

#if V_L1_BITS_REM < 4
#define V_L1_BITS (V_L1_BITS_REM + V_L2_BITS)
#else
#define V_L1_BITS V_L1_BITS_REM
#endif

#define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)

#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)

uintptr_t qemu_host_page_size;
intptr_t qemu_host_page_mask;

/* The bottom level has pointers to PageDesc */
static void *l1_map[V_L1_SIZE];
/*
* L1 Mapping properties
*/
static int v_l1_size;
static int v_l1_shift;
static int v_l2_levels;

/* The bottom level has pointers to PageDesc, and is indexed by
* anything from 4 to (V_L2_BITS + 3) bits, depending on target page size.
*/
#define V_L1_MIN_BITS 4
#define V_L1_MAX_BITS (V_L2_BITS + 3)
#define V_L1_MAX_SIZE (1 << V_L1_MAX_BITS)

static void *l1_map[V_L1_MAX_SIZE];

/* code generation context */
TCGContext tcg_ctx;
Expand All @@ -125,6 +124,26 @@ TCGContext tcg_ctx;
__thread int have_tb_lock;
#endif

static void page_table_config_init(void)
{
uint32_t v_l1_bits;

assert(TARGET_PAGE_BITS);
/* The bits remaining after N lower levels of page tables. */
v_l1_bits = (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % V_L2_BITS;
if (v_l1_bits < V_L1_MIN_BITS) {
v_l1_bits += V_L2_BITS;
}

v_l1_size = 1 << v_l1_bits;
v_l1_shift = L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - v_l1_bits;
v_l2_levels = v_l1_shift / V_L2_BITS - 1;

assert(v_l1_bits <= V_L1_MAX_BITS);
assert(v_l1_shift % V_L2_BITS == 0);
assert(v_l2_levels >= 0);
}

void tb_lock(void)
{
#ifdef CONFIG_USER_ONLY
Expand Down Expand Up @@ -332,6 +351,8 @@ void page_size_init(void)
static void page_init(void)
{
page_size_init();
page_table_config_init();

#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
{
#ifdef HAVE_KINFO_GETVMMAP
Expand Down Expand Up @@ -408,10 +429,10 @@ static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
int i;

/* Level 1. Always allocated. */
lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
lp = l1_map + ((index >> v_l1_shift) & (v_l1_size - 1));

/* Level 2..N-1. */
for (i = V_L1_SHIFT / V_L2_BITS - 1; i > 0; i--) {
for (i = v_l2_levels; i > 0; i--) {
void **p = atomic_rcu_read(lp);

if (p == NULL) {
Expand Down Expand Up @@ -826,10 +847,10 @@ static void page_flush_tb_1(int level, void **lp)

static void page_flush_tb(void)
{
int i;
int i, l1_sz = v_l1_size;

for (i = 0; i < V_L1_SIZE; i++) {
page_flush_tb_1(V_L1_SHIFT / V_L2_BITS - 1, l1_map + i);
for (i = 0; i < l1_sz; i++) {
page_flush_tb_1(v_l2_levels, l1_map + i);
}
}

Expand Down Expand Up @@ -1883,16 +1904,16 @@ static int walk_memory_regions_1(struct walk_memory_regions_data *data,
int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
{
struct walk_memory_regions_data data;
uintptr_t i;
uintptr_t i, l1_sz = v_l1_size;

data.fn = fn;
data.priv = priv;
data.start = -1u;
data.prot = 0;

for (i = 0; i < V_L1_SIZE; i++) {
int rc = walk_memory_regions_1(&data, (target_ulong)i << (V_L1_SHIFT + TARGET_PAGE_BITS),
V_L1_SHIFT / V_L2_BITS - 1, l1_map + i);
for (i = 0; i < l1_sz; i++) {
target_ulong base = i << (v_l1_shift + TARGET_PAGE_BITS);
int rc = walk_memory_regions_1(&data, base, v_l2_levels, l1_map + i);
if (rc != 0) {
return rc;
}
Expand Down

0 comments on commit 66ec9f4

Please sign in to comment.