Skip to content

Commit

Permalink
Merge tag 'pull-tcg-20230731' of https://gitlab.com/rth7680/qemu into…
Browse files Browse the repository at this point in the history
… staging

util/interval-tree: Access left/right/parent atomically
accel/tcg: Clear gen_tb on buffer overflow
bsd-user: Specify host page alignment if none specified
bsd-user: Allocate guest virtual address space
target/ppc: Disable goto_tb with architectural singlestep
target/s390x: Move trans_exc_code update to do_program_interrupt

# -----BEGIN PGP SIGNATURE-----
#
# iQFRBAABCgA7FiEEekgeeIaLTbaoWgXAZN846K9+IV8FAmTIIQUdHHJpY2hhcmQu
# aGVuZGVyc29uQGxpbmFyby5vcmcACgkQZN846K9+IV87JAf/ZgJTq26oniJ4TLkS
# 2UVBEcxGnnA2L1n4zcXG1o0onT5dAqm/6YjSlVD7C+Ol8pzQMomJKcWLL/jrCEUp
# rQXPV9ibD5bCtO47MY3ZS3aW3pqOhXOeKUFer1+YHWRRyi9Y6kEx0d2No3MSGo18
# S5A6zPwqduQvZPBPVualmtdIrpTasxhUdNfbqBW31pxYpCNg1wqIiwKoLcD5NJeX
# epVhaUi/7TwqljrK7SGXmmfDWiTHIXDtvPrJQcSYGgqpVNFzRuq6jTXRJObeWen0
# DhOHqC0Z6OkZ2gU+eso/VRbcbawQNQohUHQzZ7c0643TxncPDKG82/MDRe2MTJnq
# /z+jpw==
# =Z8UY
# -----END PGP SIGNATURE-----
# gpg: Signature made Mon 31 Jul 2023 02:00:53 PM PDT
# gpg:                using RSA key 7A481E78868B4DB6A85A05C064DF38E8AF7E215F
# gpg:                issuer "richard.henderson@linaro.org"
# gpg: Good signature from "Richard Henderson <richard.henderson@linaro.org>" [ultimate]

* tag 'pull-tcg-20230731' of https://gitlab.com/rth7680/qemu:
  target/s390x: Move trans_exc_code update to do_program_interrupt
  linux-user/armeb: Fix __kernel_cmpxchg() for armeb
  target/ppc: Disable goto_tb with architectural singlestep
  bsd-user: Specify host page alignment if none specified
  bsd-user: Allocate guest virtual address space
  accel/tcg: Clear tcg_ctx->gen_tb on buffer overflow
  util/interval-tree: Use qatomic_read/set for rb_parent_color
  util/interval-tree: Introduce pc_parent
  util/interval-tree: Use qatomic_set_mb in rb_link_node
  util/interval-tree: Use qatomic_read for left/right while searching

Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
  • Loading branch information
rth7680 committed Jul 31, 2023
2 parents 234320c + 8b94ec5 commit 8023418
Show file tree
Hide file tree
Showing 7 changed files with 132 additions and 51 deletions.
1 change: 1 addition & 0 deletions accel/tcg/translate-all.c
Original file line number Diff line number Diff line change
Expand Up @@ -374,6 +374,7 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
"Restarting code generation for "
"code_gen_buffer overflow\n");
tb_unlock_pages(tb);
tcg_ctx->gen_tb = NULL;
goto buffer_overflow;

case -2:
Expand Down
48 changes: 43 additions & 5 deletions bsd-user/main.c
Original file line number Diff line number Diff line change
Expand Up @@ -473,10 +473,6 @@ int main(int argc, char **argv)
target_environ = envlist_to_environ(envlist, NULL);
envlist_free(envlist);

if (reserved_va) {
mmap_next_start = reserved_va + 1;
}

{
Error *err = NULL;
if (seed_optarg != NULL) {
Expand All @@ -494,7 +490,49 @@ int main(int argc, char **argv)
* Now that page sizes are configured we can do
* proper page alignment for guest_base.
*/
guest_base = HOST_PAGE_ALIGN(guest_base);
if (have_guest_base) {
if (guest_base & ~qemu_host_page_mask) {
error_report("Selected guest base not host page aligned");
exit(1);
}
}

/*
* If reserving host virtual address space, do so now.
* Combined with '-B', ensure that the chosen range is free.
*/
if (reserved_va) {
void *p;

if (have_guest_base) {
p = mmap((void *)guest_base, reserved_va + 1, PROT_NONE,
MAP_ANON | MAP_PRIVATE | MAP_FIXED | MAP_EXCL, -1, 0);
} else {
p = mmap(NULL, reserved_va + 1, PROT_NONE,
MAP_ANON | MAP_PRIVATE, -1, 0);
}
if (p == MAP_FAILED) {
const char *err = strerror(errno);
char *sz = size_to_str(reserved_va + 1);

if (have_guest_base) {
error_report("Cannot allocate %s bytes at -B %p for guest "
"address space: %s", sz, (void *)guest_base, err);
} else {
error_report("Cannot allocate %s bytes for guest "
"address space: %s", sz, err);
}
exit(1);
}
guest_base = (uintptr_t)p;
have_guest_base = true;

/* Ensure that mmap_next_start is within range. */
if (reserved_va <= mmap_next_start) {
mmap_next_start = (reserved_va / 4 * 3)
& TARGET_PAGE_MASK & qemu_host_page_mask;
}
}

if (loader_exec(filename, argv + optind, target_environ, regs, info,
&bprm) != 0) {
Expand Down
3 changes: 2 additions & 1 deletion bsd-user/mmap.c
Original file line number Diff line number Diff line change
Expand Up @@ -260,7 +260,8 @@ static abi_ulong mmap_find_vma_aligned(abi_ulong start, abi_ulong size,

if (reserved_va) {
return mmap_find_vma_reserved(start, size,
(alignment != 0 ? 1 << alignment : 0));
(alignment != 0 ? 1 << alignment :
MAX(qemu_host_page_size, TARGET_PAGE_SIZE)));
}

addr = start;
Expand Down
9 changes: 7 additions & 2 deletions linux-user/arm/cpu_loop.c
Original file line number Diff line number Diff line change
Expand Up @@ -117,8 +117,9 @@ static void arm_kernel_cmpxchg32_helper(CPUARMState *env)
{
uint32_t oldval, newval, val, addr, cpsr, *host_addr;

oldval = env->regs[0];
newval = env->regs[1];
/* Swap if host != guest endianness, for the host cmpxchg below */
oldval = tswap32(env->regs[0]);
newval = tswap32(env->regs[1]);
addr = env->regs[2];

mmap_lock();
Expand Down Expand Up @@ -174,6 +175,10 @@ static void arm_kernel_cmpxchg64_helper(CPUARMState *env)
return;
}

/* Swap if host != guest endianness, for the host cmpxchg below */
oldval = tswap64(oldval);
newval = tswap64(newval);

#ifdef CONFIG_ATOMIC64
val = qatomic_cmpxchg__nocheck(host_addr, oldval, newval);
cpsr = (val == oldval) * CPSR_C;
Expand Down
3 changes: 3 additions & 0 deletions target/ppc/translate.c
Original file line number Diff line number Diff line change
Expand Up @@ -4175,6 +4175,9 @@ static void pmu_count_insns(DisasContext *ctx)

static inline bool use_goto_tb(DisasContext *ctx, target_ulong dest)
{
if (unlikely(ctx->singlestep_enabled)) {
return false;
}
return translator_use_goto_tb(&ctx->base, dest);
}

Expand Down
40 changes: 28 additions & 12 deletions target/s390x/tcg/excp_helper.c
Original file line number Diff line number Diff line change
Expand Up @@ -190,11 +190,6 @@ bool s390_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
return false;
}

if (excp != PGM_ADDRESSING) {
stq_phys(env_cpu(env)->as,
env->psa + offsetof(LowCore, trans_exc_code), tec);
}

/*
* For data accesses, ILEN will be filled in from the unwind info,
* within cpu_loop_exit_restore. For code accesses, retaddr == 0,
Expand All @@ -211,20 +206,33 @@ static void do_program_interrupt(CPUS390XState *env)
uint64_t mask, addr;
LowCore *lowcore;
int ilen = env->int_pgm_ilen;
bool set_trans_exc_code = false;
bool advance = false;

assert((env->int_pgm_code == PGM_SPECIFICATION && ilen == 0) ||
ilen == 2 || ilen == 4 || ilen == 6);

switch (env->int_pgm_code) {
case PGM_PER:
if (env->per_perc_atmid & PER_CODE_EVENT_NULLIFICATION) {
break;
}
/* FALL THROUGH */
advance = !(env->per_perc_atmid & PER_CODE_EVENT_NULLIFICATION);
break;
case PGM_ASCE_TYPE:
case PGM_REG_FIRST_TRANS:
case PGM_REG_SEC_TRANS:
case PGM_REG_THIRD_TRANS:
case PGM_SEGMENT_TRANS:
case PGM_PAGE_TRANS:
assert(env->int_pgm_code == env->tlb_fill_exc);
set_trans_exc_code = true;
break;
case PGM_PROTECTION:
assert(env->int_pgm_code == env->tlb_fill_exc);
set_trans_exc_code = true;
advance = true;
break;
case PGM_OPERATION:
case PGM_PRIVILEGED:
case PGM_EXECUTE:
case PGM_PROTECTION:
case PGM_ADDRESSING:
case PGM_SPECIFICATION:
case PGM_DATA:
Expand All @@ -243,11 +251,15 @@ static void do_program_interrupt(CPUS390XState *env)
case PGM_PC_TRANS_SPEC:
case PGM_ALET_SPEC:
case PGM_MONITOR:
/* advance the PSW if our exception is not nullifying */
env->psw.addr += ilen;
advance = true;
break;
}

/* advance the PSW if our exception is not nullifying */
if (advance) {
env->psw.addr += ilen;
}

qemu_log_mask(CPU_LOG_INT,
"%s: code=0x%x ilen=%d psw: %" PRIx64 " %" PRIx64 "\n",
__func__, env->int_pgm_code, ilen, env->psw.mask,
Expand All @@ -263,6 +275,10 @@ static void do_program_interrupt(CPUS390XState *env)
env->per_perc_atmid = 0;
}

if (set_trans_exc_code) {
lowcore->trans_exc_code = cpu_to_be64(env->tlb_fill_tec);
}

lowcore->pgm_ilen = cpu_to_be16(ilen);
lowcore->pgm_code = cpu_to_be16(env->int_pgm_code);
lowcore->program_old_psw.mask = cpu_to_be64(s390_cpu_get_psw_mask(env));
Expand Down
79 changes: 48 additions & 31 deletions util/interval-tree.c
Original file line number Diff line number Diff line change
Expand Up @@ -48,12 +48,6 @@
*
* It also guarantees that if the lookup returns an element it is the 'correct'
* one. But not returning an element does _NOT_ mean it's not present.
*
* NOTE:
*
* Stores to __rb_parent_color are not important for simple lookups so those
* are left undone as of now. Nor did I check for loops involving parent
* pointers.
*/

typedef enum RBColor
Expand All @@ -68,14 +62,29 @@ typedef struct RBAugmentCallbacks {
void (*rotate)(RBNode *old, RBNode *new);
} RBAugmentCallbacks;

static inline uintptr_t rb_pc(const RBNode *n)
{
return qatomic_read(&n->rb_parent_color);
}

static inline void rb_set_pc(RBNode *n, uintptr_t pc)
{
qatomic_set(&n->rb_parent_color, pc);
}

static inline RBNode *pc_parent(uintptr_t pc)
{
return (RBNode *)(pc & ~1);
}

static inline RBNode *rb_parent(const RBNode *n)
{
return (RBNode *)(n->rb_parent_color & ~1);
return pc_parent(rb_pc(n));
}

static inline RBNode *rb_red_parent(const RBNode *n)
{
return (RBNode *)n->rb_parent_color;
return (RBNode *)rb_pc(n);
}

static inline RBColor pc_color(uintptr_t pc)
Expand All @@ -95,27 +104,27 @@ static inline bool pc_is_black(uintptr_t pc)

static inline RBColor rb_color(const RBNode *n)
{
return pc_color(n->rb_parent_color);
return pc_color(rb_pc(n));
}

static inline bool rb_is_red(const RBNode *n)
{
return pc_is_red(n->rb_parent_color);
return pc_is_red(rb_pc(n));
}

static inline bool rb_is_black(const RBNode *n)
{
return pc_is_black(n->rb_parent_color);
return pc_is_black(rb_pc(n));
}

static inline void rb_set_black(RBNode *n)
{
n->rb_parent_color |= RB_BLACK;
rb_set_pc(n, rb_pc(n) | RB_BLACK);
}

static inline void rb_set_parent_color(RBNode *n, RBNode *p, RBColor color)
{
n->rb_parent_color = (uintptr_t)p | color;
rb_set_pc(n, (uintptr_t)p | color);
}

static inline void rb_set_parent(RBNode *n, RBNode *p)
Expand All @@ -128,7 +137,11 @@ static inline void rb_link_node(RBNode *node, RBNode *parent, RBNode **rb_link)
node->rb_parent_color = (uintptr_t)parent;
node->rb_left = node->rb_right = NULL;

qatomic_set(rb_link, node);
/*
* Ensure that node is initialized before insertion,
* as viewed by a concurrent search.
*/
qatomic_set_mb(rb_link, node);
}

static RBNode *rb_next(RBNode *node)
Expand Down Expand Up @@ -177,9 +190,10 @@ static inline void rb_change_child(RBNode *old, RBNode *new,
static inline void rb_rotate_set_parents(RBNode *old, RBNode *new,
RBRoot *root, RBColor color)
{
RBNode *parent = rb_parent(old);
uintptr_t pc = rb_pc(old);
RBNode *parent = pc_parent(pc);

new->rb_parent_color = old->rb_parent_color;
rb_set_pc(new, pc);
rb_set_parent_color(old, new, color);
rb_change_child(old, new, parent, root);
}
Expand Down Expand Up @@ -527,21 +541,21 @@ static void rb_erase_augmented(RBNode *node, RBRoot *root,
* and node must be black due to 4). We adjust colors locally
* so as to bypass rb_erase_color() later on.
*/
pc = node->rb_parent_color;
parent = rb_parent(node);
pc = rb_pc(node);
parent = pc_parent(pc);
rb_change_child(node, child, parent, root);
if (child) {
child->rb_parent_color = pc;
rb_set_pc(child, pc);
rebalance = NULL;
} else {
rebalance = pc_is_black(pc) ? parent : NULL;
}
tmp = parent;
} else if (!child) {
/* Still case 1, but this time the child is node->rb_left */
pc = node->rb_parent_color;
parent = rb_parent(node);
tmp->rb_parent_color = pc;
pc = rb_pc(node);
parent = pc_parent(pc);
rb_set_pc(tmp, pc);
rb_change_child(node, tmp, parent, root);
rebalance = NULL;
tmp = parent;
Expand Down Expand Up @@ -595,8 +609,8 @@ static void rb_erase_augmented(RBNode *node, RBRoot *root,
qatomic_set(&successor->rb_left, tmp);
rb_set_parent(tmp, successor);

pc = node->rb_parent_color;
tmp = rb_parent(node);
pc = rb_pc(node);
tmp = pc_parent(pc);
rb_change_child(node, successor, tmp, root);

if (child2) {
Expand All @@ -605,7 +619,7 @@ static void rb_erase_augmented(RBNode *node, RBRoot *root,
} else {
rebalance = rb_is_black(successor) ? parent : NULL;
}
successor->rb_parent_color = pc;
rb_set_pc(successor, pc);
tmp = successor;
}

Expand Down Expand Up @@ -745,8 +759,9 @@ static IntervalTreeNode *interval_tree_subtree_search(IntervalTreeNode *node,
* Loop invariant: start <= node->subtree_last
* (Cond2 is satisfied by one of the subtree nodes)
*/
if (node->rb.rb_left) {
IntervalTreeNode *left = rb_to_itree(node->rb.rb_left);
RBNode *tmp = qatomic_read(&node->rb.rb_left);
if (tmp) {
IntervalTreeNode *left = rb_to_itree(tmp);

if (start <= left->subtree_last) {
/*
Expand All @@ -765,8 +780,9 @@ static IntervalTreeNode *interval_tree_subtree_search(IntervalTreeNode *node,
if (start <= node->last) { /* Cond2 */
return node; /* node is leftmost match */
}
if (node->rb.rb_right) {
node = rb_to_itree(node->rb.rb_right);
tmp = qatomic_read(&node->rb.rb_right);
if (tmp) {
node = rb_to_itree(tmp);
if (start <= node->subtree_last) {
continue;
}
Expand Down Expand Up @@ -814,8 +830,9 @@ IntervalTreeNode *interval_tree_iter_first(IntervalTreeRoot *root,
IntervalTreeNode *interval_tree_iter_next(IntervalTreeNode *node,
uint64_t start, uint64_t last)
{
RBNode *rb = node->rb.rb_right, *prev;
RBNode *rb, *prev;

rb = qatomic_read(&node->rb.rb_right);
while (true) {
/*
* Loop invariants:
Expand All @@ -840,7 +857,7 @@ IntervalTreeNode *interval_tree_iter_next(IntervalTreeNode *node,
}
prev = &node->rb;
node = rb_to_itree(rb);
rb = node->rb.rb_right;
rb = qatomic_read(&node->rb.rb_right);
} while (prev == rb);

/* Check if the node intersects [start;last] */
Expand Down

0 comments on commit 8023418

Please sign in to comment.