4 changes: 2 additions & 2 deletions target/arm/tcg/translate.c
Expand Up @@ -2816,7 +2816,7 @@ static bool msr_banked_access_decode(DisasContext *s, int r, int sysm, int rn,
if (arm_dc_feature(s, ARM_FEATURE_AARCH64) &&
dc_isar_feature(aa64_sel2, s)) {
/* Target EL is EL<3 minus SCR_EL3.EEL2> */
tcg_el = load_cpu_field(cp15.scr_el3);
tcg_el = load_cpu_field_low32(cp15.scr_el3);
tcg_gen_sextract_i32(tcg_el, tcg_el, ctz32(SCR_EEL2), 1);
tcg_gen_addi_i32(tcg_el, tcg_el, 3);
} else {
Expand Down Expand Up @@ -6396,7 +6396,7 @@ static bool trans_ERET(DisasContext *s, arg_ERET *a)
}
if (s->current_el == 2) {
/* ERET from Hyp uses ELR_Hyp, not LR */
tmp = load_cpu_field(elr_el[2]);
tmp = load_cpu_field_low32(elr_el[2]);
} else {
tmp = load_reg(s, 14);
}
Expand Down
7 changes: 7 additions & 0 deletions target/arm/translate-a32.h
Expand Up @@ -61,6 +61,13 @@ static inline TCGv_i32 load_cpu_offset(int offset)

#define load_cpu_field(name) load_cpu_offset(offsetof(CPUARMState, name))

/* Load from the low half of a 64-bit field to a TCGv_i32 */
#define load_cpu_field_low32(name) \
({ \
QEMU_BUILD_BUG_ON(sizeof_field(CPUARMState, name) != 8); \
load_cpu_offset(offsetoflow32(CPUARMState, name)); \
})

void store_cpu_offset(TCGv_i32 var, int offset, int size);

#define store_cpu_field(var, name) \
Expand Down
4 changes: 2 additions & 2 deletions target/i386/cpu.c
Expand Up @@ -5718,8 +5718,8 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
} else {
*eax &= env->features[FEAT_SGX_12_1_EAX];
*ebx &= 0; /* ebx reserve */
*ecx &= env->features[FEAT_XSAVE_XSS_LO];
*edx &= env->features[FEAT_XSAVE_XSS_HI];
*ecx &= env->features[FEAT_XSAVE_XCR0_LO];
*edx &= env->features[FEAT_XSAVE_XCR0_HI];

/* FP and SSE are always allowed regardless of XSAVE/XCR0. */
*ecx |= XSTATE_FP_MASK | XSTATE_SSE_MASK;
Expand Down
8 changes: 8 additions & 0 deletions target/i386/ops_sse.h
Expand Up @@ -2497,6 +2497,14 @@ void helper_vpermdq_ymm(Reg *d, Reg *v, Reg *s, uint32_t order)
d->Q(1) = r1;
d->Q(2) = r2;
d->Q(3) = r3;
if (order & 0x8) {
d->Q(0) = 0;
d->Q(1) = 0;
}
if (order & 0x80) {
d->Q(2) = 0;
d->Q(3) = 0;
}
}

void helper_vpermq_ymm(Reg *d, Reg *s, uint32_t order)
Expand Down
15 changes: 13 additions & 2 deletions target/i386/tcg/decode-new.c.inc
Expand Up @@ -783,6 +783,17 @@ static void decode_0F2D(DisasContext *s, CPUX86State *env, X86OpEntry *entry, ui
*entry = *decode_by_prefix(s, opcodes_0F2D);
}

static void decode_VxCOMISx(DisasContext *s, CPUX86State *env, X86OpEntry *entry, uint8_t *b)
{
/*
* VUCOMISx and VCOMISx are different and use no-prefix and 0x66 for SS and SD
* respectively. Scalar values usually are associated with 0xF2 and 0xF3, for
* which X86_VEX_REPScalar exists, but here it has to be decoded by hand.
*/
entry->s1 = entry->s2 = (s->prefix & PREFIX_DATA ? X86_SIZE_sd : X86_SIZE_ss);
entry->gen = (*b == 0x2E ? gen_VUCOMI : gen_VCOMI);
}

static void decode_sse_unary(DisasContext *s, CPUX86State *env, X86OpEntry *entry, uint8_t *b)
{
if (!(s->prefix & (PREFIX_REPZ | PREFIX_REPNZ))) {
Expand Down Expand Up @@ -871,8 +882,8 @@ static const X86OpEntry opcodes_0F[256] = {
[0x2B] = X86_OP_GROUP0(0F2B),
[0x2C] = X86_OP_GROUP0(0F2C),
[0x2D] = X86_OP_GROUP0(0F2D),
[0x2E] = X86_OP_ENTRY3(VUCOMI, None,None, V,x, W,x, vex4 p_00_66),
[0x2F] = X86_OP_ENTRY3(VCOMI, None,None, V,x, W,x, vex4 p_00_66),
[0x2E] = X86_OP_GROUP3(VxCOMISx, None,None, V,x, W,x, vex3 p_00_66), /* VUCOMISS/SD */
[0x2F] = X86_OP_GROUP3(VxCOMISx, None,None, V,x, W,x, vex3 p_00_66), /* VCOMISS/SD */

[0x38] = X86_OP_GROUP0(0F38),
[0x3a] = X86_OP_GROUP0(0F3A),
Expand Down
2 changes: 1 addition & 1 deletion target/i386/tcg/emit.c.inc
Expand Up @@ -2285,7 +2285,7 @@ static void gen_VZEROALL(DisasContext *s, CPUX86State *env, X86DecodedInsn *deco
{
TCGv_ptr ptr = tcg_temp_new_ptr();

tcg_gen_addi_ptr(ptr, cpu_env, offsetof(CPUX86State, xmm_t0));
tcg_gen_addi_ptr(ptr, cpu_env, offsetof(CPUX86State, xmm_regs));
gen_helper_memset(ptr, ptr, tcg_constant_i32(0),
tcg_constant_ptr(CPU_NB_REGS * sizeof(ZMMReg)));
}
Expand Down
2 changes: 1 addition & 1 deletion target/ppc/translate/vmx-impl.c.inc
Expand Up @@ -2058,7 +2058,7 @@ static bool trans_VEXPANDQM(DisasContext *ctx, arg_VX_tb *a)
static bool do_vextractm(DisasContext *ctx, arg_VX_tb *a, unsigned vece)
{
const uint64_t elem_width = 8 << vece, elem_count_half = 8 >> vece,
mask = dup_const(vece, 1 << (elem_width - 1));
mask = dup_const(vece, 1ULL << (elem_width - 1));
uint64_t i, j;
TCGv_i64 lo, hi, t0, t1;

Expand Down
11 changes: 9 additions & 2 deletions target/riscv/csr.c
Expand Up @@ -3797,6 +3797,11 @@ static inline RISCVException riscv_csrrw_check(CPURISCVState *env,
return RISCV_EXCP_ILLEGAL_INST;
}

/* ensure CSR is implemented by checking predicate */
if (!csr_ops[csrno].predicate) {
return RISCV_EXCP_ILLEGAL_INST;
}

/* privileged spec version check */
if (env->priv_ver < csr_min_priv) {
return RISCV_EXCP_ILLEGAL_INST;
Expand All @@ -3814,7 +3819,6 @@ static inline RISCVException riscv_csrrw_check(CPURISCVState *env,
* illegal instruction exception should be triggered instead of virtual
* instruction exception. Hence this comes after the read / write check.
*/
g_assert(csr_ops[csrno].predicate != NULL);
RISCVException ret = csr_ops[csrno].predicate(env, csrno);
if (ret != RISCV_EXCP_NONE) {
return ret;
Expand Down Expand Up @@ -3991,7 +3995,10 @@ RISCVException riscv_csrrw_debug(CPURISCVState *env, int csrno,
return ret;
}

/* Control and Status Register function table */
/*
* Control and Status Register function table
* riscv_csr_operations::predicate() must be provided for an implemented CSR
*/
riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = {
/* User Floating-Point CSRs */
[CSR_FFLAGS] = { "fflags", fs, read_fflags, write_fflags },
Expand Down
6 changes: 6 additions & 0 deletions target/riscv/insn_trans/trans_privileged.c.inc
Expand Up @@ -77,6 +77,9 @@ static bool trans_sret(DisasContext *ctx, arg_sret *a)
#ifndef CONFIG_USER_ONLY
if (has_ext(ctx, RVS)) {
decode_save_opc(ctx);
if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
gen_io_start();
}
gen_helper_sret(cpu_pc, cpu_env);
exit_tb(ctx); /* no chaining */
ctx->base.is_jmp = DISAS_NORETURN;
Expand All @@ -93,6 +96,9 @@ static bool trans_mret(DisasContext *ctx, arg_mret *a)
{
#ifndef CONFIG_USER_ONLY
decode_save_opc(ctx);
if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
gen_io_start();
}
gen_helper_mret(cpu_pc, cpu_env);
exit_tb(ctx); /* no chaining */
ctx->base.is_jmp = DISAS_NORETURN;
Expand Down
2 changes: 1 addition & 1 deletion target/s390x/tcg/insn-data.h.inc
Expand Up @@ -606,7 +606,7 @@
F(0xed04, LDEB, RXE, Z, 0, m2_32u, new, f1, ldeb, 0, IF_BFP)
F(0xed05, LXDB, RXE, Z, 0, m2_64, new_x, x1, lxdb, 0, IF_BFP)
F(0xed06, LXEB, RXE, Z, 0, m2_32u, new_x, x1, lxeb, 0, IF_BFP)
F(0xb324, LDER, RXE, Z, 0, e2, new, f1, lde, 0, IF_AFP1)
F(0xb324, LDER, RRE, Z, 0, e2, new, f1, lde, 0, IF_AFP1)
F(0xed24, LDE, RXE, Z, 0, m2_32u, new, f1, lde, 0, IF_AFP1)
/* LOAD ROUNDED */
F(0xb344, LEDBR, RRF_e, Z, 0, f2, new, e1, ledb, 0, IF_BFP)
Expand Down
81 changes: 58 additions & 23 deletions target/s390x/tcg/translate.c
Expand Up @@ -1534,18 +1534,51 @@ static DisasJumpType op_bal(DisasContext *s, DisasOps *o)
}
}

/*
* Disassemble the target of a branch. The results are returned in a form
* suitable for passing into help_branch():
*
* - bool IS_IMM reflects whether the target is fixed or computed. Non-EXECUTEd
* branches, whose DisasContext *S contains the relative immediate field RI,
* are considered fixed. All the other branches are considered computed.
* - int IMM is the value of RI.
* - TCGv_i64 CDEST is the address of the computed target.
*/
#define disas_jdest(s, ri, is_imm, imm, cdest) do { \
if (have_field(s, ri)) { \
if (unlikely(s->ex_value)) { \
cdest = tcg_temp_new_i64(); \
tcg_gen_ld_i64(cdest, cpu_env, offsetof(CPUS390XState, ex_target));\
tcg_gen_addi_i64(cdest, cdest, (int64_t)get_field(s, ri) * 2); \
is_imm = false; \
} else { \
is_imm = true; \
} \
} else { \
is_imm = false; \
} \
imm = is_imm ? get_field(s, ri) : 0; \
} while (false)

static DisasJumpType op_basi(DisasContext *s, DisasOps *o)
{
DisasCompare c;
bool is_imm;
int imm;

pc_to_link_info(o->out, s, s->pc_tmp);
return help_goto_direct(s, s->base.pc_next + (int64_t)get_field(s, i2) * 2);

disas_jdest(s, i2, is_imm, imm, o->in2);
disas_jcc(s, &c, 0xf);
return help_branch(s, &c, is_imm, imm, o->in2);
}

static DisasJumpType op_bc(DisasContext *s, DisasOps *o)
{
int m1 = get_field(s, m1);
bool is_imm = have_field(s, i2);
int imm = is_imm ? get_field(s, i2) : 0;
DisasCompare c;
bool is_imm;
int imm;

/* BCR with R2 = 0 causes no branching */
if (have_field(s, r2) && get_field(s, r2) == 0) {
Expand All @@ -1562,17 +1595,18 @@ static DisasJumpType op_bc(DisasContext *s, DisasOps *o)
return DISAS_NEXT;
}

disas_jdest(s, i2, is_imm, imm, o->in2);
disas_jcc(s, &c, m1);
return help_branch(s, &c, is_imm, imm, o->in2);
}

static DisasJumpType op_bct32(DisasContext *s, DisasOps *o)
{
int r1 = get_field(s, r1);
bool is_imm = have_field(s, i2);
int imm = is_imm ? get_field(s, i2) : 0;
DisasCompare c;
bool is_imm;
TCGv_i64 t;
int imm;

c.cond = TCG_COND_NE;
c.is_64 = false;
Expand All @@ -1584,6 +1618,7 @@ static DisasJumpType op_bct32(DisasContext *s, DisasOps *o)
c.u.s32.b = tcg_constant_i32(0);
tcg_gen_extrl_i64_i32(c.u.s32.a, t);

disas_jdest(s, i2, is_imm, imm, o->in2);
return help_branch(s, &c, is_imm, imm, o->in2);
}

Expand Down Expand Up @@ -1611,9 +1646,9 @@ static DisasJumpType op_bcth(DisasContext *s, DisasOps *o)
static DisasJumpType op_bct64(DisasContext *s, DisasOps *o)
{
int r1 = get_field(s, r1);
bool is_imm = have_field(s, i2);
int imm = is_imm ? get_field(s, i2) : 0;
DisasCompare c;
bool is_imm;
int imm;

c.cond = TCG_COND_NE;
c.is_64 = true;
Expand All @@ -1622,17 +1657,18 @@ static DisasJumpType op_bct64(DisasContext *s, DisasOps *o)
c.u.s64.a = regs[r1];
c.u.s64.b = tcg_constant_i64(0);

disas_jdest(s, i2, is_imm, imm, o->in2);
return help_branch(s, &c, is_imm, imm, o->in2);
}

static DisasJumpType op_bx32(DisasContext *s, DisasOps *o)
{
int r1 = get_field(s, r1);
int r3 = get_field(s, r3);
bool is_imm = have_field(s, i2);
int imm = is_imm ? get_field(s, i2) : 0;
DisasCompare c;
bool is_imm;
TCGv_i64 t;
int imm;

c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
c.is_64 = false;
Expand All @@ -1645,16 +1681,17 @@ static DisasJumpType op_bx32(DisasContext *s, DisasOps *o)
tcg_gen_extrl_i64_i32(c.u.s32.b, regs[r3 | 1]);
store_reg32_i64(r1, t);

disas_jdest(s, i2, is_imm, imm, o->in2);
return help_branch(s, &c, is_imm, imm, o->in2);
}

static DisasJumpType op_bx64(DisasContext *s, DisasOps *o)
{
int r1 = get_field(s, r1);
int r3 = get_field(s, r3);
bool is_imm = have_field(s, i2);
int imm = is_imm ? get_field(s, i2) : 0;
DisasCompare c;
bool is_imm;
int imm;

c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
c.is_64 = true;
Expand All @@ -1668,6 +1705,7 @@ static DisasJumpType op_bx64(DisasContext *s, DisasOps *o)
tcg_gen_add_i64(regs[r1], regs[r1], regs[r3]);
c.u.s64.a = regs[r1];

disas_jdest(s, i2, is_imm, imm, o->in2);
return help_branch(s, &c, is_imm, imm, o->in2);
}

Expand All @@ -1685,10 +1723,9 @@ static DisasJumpType op_cj(DisasContext *s, DisasOps *o)
c.u.s64.a = o->in1;
c.u.s64.b = o->in2;

is_imm = have_field(s, i4);
if (is_imm) {
imm = get_field(s, i4);
} else {
o->out = NULL;
disas_jdest(s, i4, is_imm, imm, o->out);
if (!is_imm && !o->out) {
imm = 0;
o->out = get_address(s, 0, get_field(s, b4),
get_field(s, d4));
Expand Down Expand Up @@ -5774,15 +5811,13 @@ static void in2_a2(DisasContext *s, DisasOps *o)

static TCGv gen_ri2(DisasContext *s)
{
int64_t delta = (int64_t)get_field(s, i2) * 2;
TCGv ri2;
TCGv ri2 = NULL;
bool is_imm;
int imm;

if (unlikely(s->ex_value)) {
ri2 = tcg_temp_new_i64();
tcg_gen_ld_i64(ri2, cpu_env, offsetof(CPUS390XState, ex_target));
tcg_gen_addi_i64(ri2, ri2, delta);
} else {
ri2 = tcg_constant_i64(s->base.pc_next + delta);
disas_jdest(s, i2, is_imm, imm, ri2);
if (is_imm) {
ri2 = tcg_constant_i64(s->base.pc_next + imm * 2);
}

return ri2;
Expand Down
2 changes: 1 addition & 1 deletion tcg/i386/tcg-target.c.inc
Expand Up @@ -1083,7 +1083,7 @@ static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs,
{
/* This function is only used for passing structs by reference. */
tcg_debug_assert(imm == (int32_t)imm);
tcg_out_modrm_offset(s, OPC_LEA, rd, rs, imm);
tcg_out_modrm_offset(s, OPC_LEA | P_REXW, rd, rs, imm);
}

static inline void tcg_out_pushi(TCGContext *s, tcg_target_long val)
Expand Down
2 changes: 1 addition & 1 deletion tests/docker/dockerfiles/debian-xtensa-cross.docker
Expand Up @@ -5,7 +5,7 @@
# using a prebuilt toolchains for Xtensa cores from:
# https://github.com/foss-xtensa/toolchain/releases
#
FROM docker.io/library/debian:stretch-slim
FROM docker.io/library/debian:11-slim

RUN apt-get update && \
DEBIAN_FRONTEND=noninteractive apt install -yy eatmydata && \
Expand Down
33 changes: 33 additions & 0 deletions tests/qtest/fuzz-lsi53c895a-test.c
Expand Up @@ -8,6 +8,36 @@
#include "qemu/osdep.h"
#include "libqtest.h"

/*
* This used to trigger a DMA reentrancy issue
* leading to memory corruption bugs like stack
* overflow or use-after-free
* https://gitlab.com/qemu-project/qemu/-/issues/1563
*/
static void test_lsi_dma_reentrancy(void)
{
QTestState *s;

s = qtest_init("-M q35 -m 512M -nodefaults "
"-blockdev driver=null-co,node-name=null0 "
"-device lsi53c810 -device scsi-cd,drive=null0");

qtest_outl(s, 0xcf8, 0x80000804); /* PCI Command Register */
qtest_outw(s, 0xcfc, 0x7); /* Enables accesses */
qtest_outl(s, 0xcf8, 0x80000814); /* Memory Bar 1 */
qtest_outl(s, 0xcfc, 0xff100000); /* Set MMIO Address*/
qtest_outl(s, 0xcf8, 0x80000818); /* Memory Bar 2 */
qtest_outl(s, 0xcfc, 0xff000000); /* Set RAM Address*/
qtest_writel(s, 0xff000000, 0xc0000024);
qtest_writel(s, 0xff000114, 0x00000080);
qtest_writel(s, 0xff00012c, 0xff000000);
qtest_writel(s, 0xff000004, 0xff000114);
qtest_writel(s, 0xff000008, 0xff100014);
qtest_writel(s, 0xff10002f, 0x000000ff);

qtest_quit(s);
}

/*
* This used to trigger a UAF in lsi_do_msgout()
* https://gitlab.com/qemu-project/qemu/-/issues/972
Expand Down Expand Up @@ -124,5 +154,8 @@ int main(int argc, char **argv)
qtest_add_func("fuzz/lsi53c895a/lsi_do_msgout_cancel_req",
test_lsi_do_msgout_cancel_req);

qtest_add_func("fuzz/lsi53c895a/lsi_dma_reentrancy",
test_lsi_dma_reentrancy);

return g_test_run();
}
5 changes: 4 additions & 1 deletion tests/unit/meson.build
Expand Up @@ -114,7 +114,10 @@ if have_block
tests += {'test-crypto-xts': [crypto, io]}
endif
if 'CONFIG_POSIX' in config_host
tests += {'test-image-locking': [testblock]}
tests += {
'test-image-locking': [testblock],
'test-nested-aio-poll': [testblock],
}
endif
if config_host_data.get('CONFIG_REPLICATION')
tests += {'test-replication': [testblock]}
Expand Down
130 changes: 130 additions & 0 deletions tests/unit/test-nested-aio-poll.c
@@ -0,0 +1,130 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Test that poll handlers are not re-entrant in nested aio_poll()
*
* Copyright Red Hat
*
* Poll handlers are usually level-triggered. That means they continue firing
* until the condition is reset (e.g. a virtqueue becomes empty). If a poll
* handler calls nested aio_poll() before the condition is reset, then infinite
* recursion occurs.
*
* aio_poll() is supposed to prevent this by disabling poll handlers in nested
* aio_poll() calls. This test case checks that this is indeed what happens.
*/
#include "qemu/osdep.h"
#include "block/aio.h"
#include "qapi/error.h"

typedef struct {
AioContext *ctx;

/* This is the EventNotifier that drives the test */
EventNotifier poll_notifier;

/* This EventNotifier is only used to wake aio_poll() */
EventNotifier dummy_notifier;

bool nested;
} TestData;

static void io_read(EventNotifier *notifier)
{
fprintf(stderr, "%s %p\n", __func__, notifier);
event_notifier_test_and_clear(notifier);
}

static bool io_poll_true(void *opaque)
{
fprintf(stderr, "%s %p\n", __func__, opaque);
return true;
}

static bool io_poll_false(void *opaque)
{
fprintf(stderr, "%s %p\n", __func__, opaque);
return false;
}

static void io_poll_ready(EventNotifier *notifier)
{
TestData *td = container_of(notifier, TestData, poll_notifier);

fprintf(stderr, "> %s\n", __func__);

g_assert(!td->nested);
td->nested = true;

/* Wake the following nested aio_poll() call */
event_notifier_set(&td->dummy_notifier);

/* This nested event loop must not call io_poll()/io_poll_ready() */
g_assert(aio_poll(td->ctx, true));

td->nested = false;

fprintf(stderr, "< %s\n", __func__);
}

/* dummy_notifier never triggers */
static void io_poll_never_ready(EventNotifier *notifier)
{
g_assert_not_reached();
}

static void test(void)
{
TestData td = {
.ctx = aio_context_new(&error_abort),
};

qemu_set_current_aio_context(td.ctx);

/* Enable polling */
aio_context_set_poll_params(td.ctx, 1000000, 2, 2, &error_abort);

/*
* The GSource is unused but this has the side-effect of changing the fdmon
* that AioContext uses.
*/
aio_get_g_source(td.ctx);

/* Make the event notifier active (set) right away */
event_notifier_init(&td.poll_notifier, 1);
aio_set_event_notifier(td.ctx, &td.poll_notifier, false,
io_read, io_poll_true, io_poll_ready);

/* This event notifier will be used later */
event_notifier_init(&td.dummy_notifier, 0);
aio_set_event_notifier(td.ctx, &td.dummy_notifier, false,
io_read, io_poll_false, io_poll_never_ready);

/* Consume aio_notify() */
g_assert(!aio_poll(td.ctx, false));

/*
* Run the io_read() handler. This has the side-effect of activating
* polling in future aio_poll() calls.
*/
g_assert(aio_poll(td.ctx, true));

/* The second time around the io_poll()/io_poll_ready() handler runs */
g_assert(aio_poll(td.ctx, true));

/* Run io_poll()/io_poll_ready() one more time to show it keeps working */
g_assert(aio_poll(td.ctx, true));

aio_set_event_notifier(td.ctx, &td.dummy_notifier, false,
NULL, NULL, NULL);
aio_set_event_notifier(td.ctx, &td.poll_notifier, false, NULL, NULL, NULL);
event_notifier_cleanup(&td.dummy_notifier);
event_notifier_cleanup(&td.poll_notifier);
aio_context_unref(td.ctx);
}

int main(int argc, char **argv)
{
g_test_init(&argc, &argv, NULL);
g_test_add_func("/nested-aio-poll", test);
return g_test_run();
}
4 changes: 2 additions & 2 deletions ui/console.c
Expand Up @@ -311,7 +311,7 @@ static bool png_save(int fd, pixman_image_t *image, Error **errp)
png_struct *png_ptr;
png_info *info_ptr;
g_autoptr(pixman_image_t) linebuf =
qemu_pixman_linebuf_create(PIXMAN_a8r8g8b8, width);
qemu_pixman_linebuf_create(PIXMAN_BE_r8g8b8, width);
uint8_t *buf = (uint8_t *)pixman_image_get_data(linebuf);
FILE *f = fdopen(fd, "wb");
int y;
Expand Down Expand Up @@ -341,7 +341,7 @@ static bool png_save(int fd, pixman_image_t *image, Error **errp)
png_init_io(png_ptr, f);

png_set_IHDR(png_ptr, info_ptr, width, height, 8,
PNG_COLOR_TYPE_RGB_ALPHA, PNG_INTERLACE_NONE,
PNG_COLOR_TYPE_RGB, PNG_INTERLACE_NONE,
PNG_COMPRESSION_TYPE_BASE, PNG_FILTER_TYPE_BASE);

png_write_info(png_ptr, info_ptr);
Expand Down
2 changes: 1 addition & 1 deletion ui/vnc.c
Expand Up @@ -3751,7 +3751,7 @@ static int vnc_display_get_address(const char *addrstr,

addr->type = SOCKET_ADDRESS_TYPE_INET;
inet = &addr->u.inet;
if (addrstr[0] == '[' && addrstr[hostlen - 1] == ']') {
if (hostlen && addrstr[0] == '[' && addrstr[hostlen - 1] == ']') {
inet->host = g_strndup(addrstr + 1, hostlen - 2);
} else {
inet->host = g_strndup(addrstr, hostlen);
Expand Down
11 changes: 11 additions & 0 deletions util/aio-posix.c
Expand Up @@ -353,8 +353,19 @@ static bool aio_dispatch_handler(AioContext *ctx, AioHandler *node)
poll_ready && revents == 0 &&
aio_node_check(ctx, node->is_external) &&
node->io_poll_ready) {
/*
* Remove temporarily to avoid infinite loops when ->io_poll_ready()
* calls aio_poll() before clearing the condition that made the poll
* handler become ready.
*/
QLIST_SAFE_REMOVE(node, node_poll);

node->io_poll_ready(node->opaque);

if (!QLIST_IS_INSERTED(node, node_poll)) {
QLIST_INSERT_HEAD(&ctx->poll_aio_handlers, node, node_poll);
}

/*
* Return early since revents was zero. aio_notify() does not count as
* progress.
Expand Down
21 changes: 21 additions & 0 deletions util/async-teardown.c
Expand Up @@ -12,6 +12,9 @@
*/

#include "qemu/osdep.h"
#include "qemu/config-file.h"
#include "qemu/option.h"
#include "qemu/module.h"
#include <dirent.h>
#include <sys/prctl.h>
#include <sched.h>
Expand Down Expand Up @@ -144,3 +147,21 @@ void init_async_teardown(void)
clone(async_teardown_fn, new_stack_for_clone(), CLONE_VM, NULL);
sigprocmask(SIG_SETMASK, &old_signals, NULL);
}

static QemuOptsList qemu_run_with_opts = {
.name = "run-with",
.head = QTAILQ_HEAD_INITIALIZER(qemu_run_with_opts.head),
.desc = {
{
.name = "async-teardown",
.type = QEMU_OPT_BOOL,
},
{ /* end of list */ }
},
};

static void register_teardown(void)
{
qemu_add_opts(&qemu_run_with_opts);
}
opts_init(register_teardown);
14 changes: 14 additions & 0 deletions util/async.c
Expand Up @@ -164,7 +164,21 @@ int aio_bh_poll(AioContext *ctx)

/* Synchronizes with QSLIST_INSERT_HEAD_ATOMIC in aio_bh_enqueue(). */
QSLIST_MOVE_ATOMIC(&slice.bh_list, &ctx->bh_list);

/*
* GCC13 [-Werror=dangling-pointer=] complains that the local variable
* 'slice' is being stored in the global 'ctx->bh_slice_list' but the
* list is emptied before this function returns.
*/
#if !defined(__clang__)
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wpragmas"
#pragma GCC diagnostic ignored "-Wdangling-pointer="
#endif
QSIMPLEQ_INSERT_TAIL(&ctx->bh_slice_list, &slice, next);
#if !defined(__clang__)
#pragma GCC diagnostic pop
#endif

while ((s = QSIMPLEQ_FIRST(&ctx->bh_slice_list))) {
QEMUBH *bh;
Expand Down
8 changes: 5 additions & 3 deletions util/vfio-helpers.c
Expand Up @@ -106,15 +106,17 @@ struct QEMUVFIOState {
*/
static char *sysfs_find_group_file(const char *device, Error **errp)
{
g_autoptr(GError) gerr = NULL;
char *sysfs_link;
char *sysfs_group;
char *p;
char *path = NULL;

sysfs_link = g_strdup_printf("/sys/bus/pci/devices/%s/iommu_group", device);
sysfs_group = g_malloc0(PATH_MAX);
if (readlink(sysfs_link, sysfs_group, PATH_MAX - 1) == -1) {
error_setg_errno(errp, errno, "Failed to find iommu group sysfs path");
sysfs_group = g_file_read_link(sysfs_link, &gerr);
if (gerr) {
error_setg(errp, "Failed to find iommu group sysfs path: %s",
gerr->message);
goto out;
}
p = strrchr(sysfs_group, '/');
Expand Down