360 changes: 180 additions & 180 deletions hw/ide/microdrive.c

Large diffs are not rendered by default.

2 changes: 1 addition & 1 deletion hw/loongarch/virt.c
Expand Up @@ -399,7 +399,7 @@ static struct _loaderparams {

static uint64_t cpu_loongarch_virt_to_phys(void *opaque, uint64_t addr)
{
return addr & 0x1fffffffll;
return addr & MAKE_64BIT_MASK(0, TARGET_PHYS_ADDR_SPACE_BITS);
}

static int64_t load_kernel_info(void)
Expand Down
9 changes: 8 additions & 1 deletion hw/m68k/virt.c
Expand Up @@ -347,10 +347,17 @@ type_init(virt_machine_register_types)
} \
type_init(machvirt_machine_##major##_##minor##_init);

static void virt_machine_8_1_options(MachineClass *mc)
{
}
DEFINE_VIRT_MACHINE(8, 1, true)

static void virt_machine_8_0_options(MachineClass *mc)
{
virt_machine_8_1_options(mc);
compat_props_add(mc->compat_props, hw_compat_8_0, hw_compat_8_0_len);
}
DEFINE_VIRT_MACHINE(8, 0, true)
DEFINE_VIRT_MACHINE(8, 0, false)

static void virt_machine_7_2_options(MachineClass *mc)
{
Expand Down
4 changes: 2 additions & 2 deletions hw/misc/lasi.c
Expand Up @@ -194,7 +194,7 @@ static const MemoryRegionOps lasi_chip_ops = {

static const VMStateDescription vmstate_lasi = {
.name = "Lasi",
.version_id = 1,
.version_id = 2,
.minimum_version_id = 1,
.fields = (VMStateField[]) {
VMSTATE_UINT32(irr, LasiState),
Expand All @@ -204,6 +204,7 @@ static const VMStateDescription vmstate_lasi = {
VMSTATE_UINT32(iar, LasiState),
VMSTATE_UINT32(errlog, LasiState),
VMSTATE_UINT32(amr, LasiState),
VMSTATE_UINT32_V(rtc_ref, LasiState, 2),
VMSTATE_END_OF_LIST()
}
};
Expand Down Expand Up @@ -233,7 +234,6 @@ static void lasi_reset(DeviceState *dev)
s->iar = 0xFFFB0000 + 3; /* CPU_HPA + 3 */

/* Real time clock (RTC), it's only one 32-bit counter @9000 */
s->rtc = time(NULL);
s->rtc_ref = 0;
}

Expand Down
6 changes: 3 additions & 3 deletions hw/net/e1000e_core.c
Expand Up @@ -765,7 +765,7 @@ e1000e_process_tx_desc(E1000ECore *core,
}

tx->skip_cp = false;
net_tx_pkt_reset(tx->tx_pkt);
net_tx_pkt_reset(tx->tx_pkt, core->owner);

tx->sum_needed = 0;
tx->cptse = 0;
Expand Down Expand Up @@ -3447,7 +3447,7 @@ e1000e_core_pci_uninit(E1000ECore *core)
qemu_del_vm_change_state_handler(core->vmstate);

for (i = 0; i < E1000E_NUM_QUEUES; i++) {
net_tx_pkt_reset(core->tx[i].tx_pkt);
net_tx_pkt_reset(core->tx[i].tx_pkt, core->owner);
net_tx_pkt_uninit(core->tx[i].tx_pkt);
}

Expand Down Expand Up @@ -3572,7 +3572,7 @@ static void e1000e_reset(E1000ECore *core, bool sw)
e1000x_reset_mac_addr(core->owner_nic, core->mac, core->permanent_mac);

for (i = 0; i < ARRAY_SIZE(core->tx); i++) {
net_tx_pkt_reset(core->tx[i].tx_pkt);
net_tx_pkt_reset(core->tx[i].tx_pkt, core->owner);
memset(&core->tx[i].props, 0, sizeof(core->tx[i].props));
core->tx[i].skip_cp = false;
}
Expand Down
4 changes: 4 additions & 0 deletions hw/net/e1000x_regs.h
Expand Up @@ -335,6 +335,7 @@
#define E1000_ICR_RXDMT0 0x00000010 /* rx desc min. threshold (0) */
#define E1000_ICR_RXO 0x00000040 /* rx overrun */
#define E1000_ICR_RXT0 0x00000080 /* rx timer intr (ring 0) */
#define E1000_ICR_RXDW 0x00000080 /* rx desc written back */
#define E1000_ICR_MDAC 0x00000200 /* MDIO access complete */
#define E1000_ICR_RXCFG 0x00000400 /* RX /c/ ordered set */
#define E1000_ICR_GPI_EN0 0x00000800 /* GP Int 0 */
Expand Down Expand Up @@ -378,6 +379,7 @@
#define E1000_ICS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */
#define E1000_ICS_RXO E1000_ICR_RXO /* rx overrun */
#define E1000_ICS_RXT0 E1000_ICR_RXT0 /* rx timer intr */
#define E1000_ICS_RXDW E1000_ICR_RXDW /* rx desc written back */
#define E1000_ICS_MDAC E1000_ICR_MDAC /* MDIO access complete */
#define E1000_ICS_RXCFG E1000_ICR_RXCFG /* RX /c/ ordered set */
#define E1000_ICS_GPI_EN0 E1000_ICR_GPI_EN0 /* GP Int 0 */
Expand Down Expand Up @@ -407,6 +409,7 @@
#define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */
#define E1000_IMS_RXO E1000_ICR_RXO /* rx overrun */
#define E1000_IMS_RXT0 E1000_ICR_RXT0 /* rx timer intr */
#define E1000_IMS_RXDW E1000_ICR_RXDW /* rx desc written back */
#define E1000_IMS_MDAC E1000_ICR_MDAC /* MDIO access complete */
#define E1000_IMS_RXCFG E1000_ICR_RXCFG /* RX /c/ ordered set */
#define E1000_IMS_GPI_EN0 E1000_ICR_GPI_EN0 /* GP Int 0 */
Expand Down Expand Up @@ -441,6 +444,7 @@
#define E1000_IMC_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */
#define E1000_IMC_RXO E1000_ICR_RXO /* rx overrun */
#define E1000_IMC_RXT0 E1000_ICR_RXT0 /* rx timer intr */
#define E1000_IMC_RXDW E1000_ICR_RXDW /* rx desc written back */
#define E1000_IMC_MDAC E1000_ICR_MDAC /* MDIO access complete */
#define E1000_IMC_RXCFG E1000_ICR_RXCFG /* RX /c/ ordered set */
#define E1000_IMC_GPI_EN0 E1000_ICR_GPI_EN0 /* GP Int 0 */
Expand Down
26 changes: 19 additions & 7 deletions hw/net/igb.c
Expand Up @@ -502,16 +502,28 @@ static int igb_post_load(void *opaque, int version_id)
return igb_core_post_load(&s->core);
}

static const VMStateDescription igb_vmstate_tx = {
.name = "igb-tx",
static const VMStateDescription igb_vmstate_tx_ctx = {
.name = "igb-tx-ctx",
.version_id = 1,
.minimum_version_id = 1,
.fields = (VMStateField[]) {
VMSTATE_UINT16(vlan, struct igb_tx),
VMSTATE_UINT16(mss, struct igb_tx),
VMSTATE_BOOL(tse, struct igb_tx),
VMSTATE_BOOL(ixsm, struct igb_tx),
VMSTATE_BOOL(txsm, struct igb_tx),
VMSTATE_UINT32(vlan_macip_lens, struct e1000_adv_tx_context_desc),
VMSTATE_UINT32(seqnum_seed, struct e1000_adv_tx_context_desc),
VMSTATE_UINT32(type_tucmd_mlhl, struct e1000_adv_tx_context_desc),
VMSTATE_UINT32(mss_l4len_idx, struct e1000_adv_tx_context_desc),
VMSTATE_END_OF_LIST()
}
};

static const VMStateDescription igb_vmstate_tx = {
.name = "igb-tx",
.version_id = 2,
.minimum_version_id = 2,
.fields = (VMStateField[]) {
VMSTATE_STRUCT_ARRAY(ctx, struct igb_tx, 2, 0, igb_vmstate_tx_ctx,
struct e1000_adv_tx_context_desc),
VMSTATE_UINT32(first_cmd_type_len, struct igb_tx),
VMSTATE_UINT32(first_olinfo_status, struct igb_tx),
VMSTATE_BOOL(first, struct igb_tx),
VMSTATE_BOOL(skip_cp, struct igb_tx),
VMSTATE_END_OF_LIST()
Expand Down
256 changes: 194 additions & 62 deletions hw/net/igb_core.c

Large diffs are not rendered by default.

9 changes: 4 additions & 5 deletions hw/net/igb_core.h
Expand Up @@ -47,6 +47,7 @@
#define IGB_MSIX_VEC_NUM (10)
#define IGBVF_MSIX_VEC_NUM (3)
#define IGB_NUM_QUEUES (16)
#define IGB_NUM_VM_POOLS (8)

typedef struct IGBCore IGBCore;

Expand All @@ -72,11 +73,9 @@ struct IGBCore {
QEMUTimer *autoneg_timer;

struct igb_tx {
uint16_t vlan; /* VLAN Tag */
uint16_t mss; /* Maximum Segment Size */
bool tse; /* TCP/UDP Segmentation Enable */
bool ixsm; /* Insert IP Checksum */
bool txsm; /* Insert TCP/UDP Checksum */
struct e1000_adv_tx_context_desc ctx[2];
uint32_t first_cmd_type_len;
uint32_t first_olinfo_status;

bool first;
bool skip_cp;
Expand Down
6 changes: 6 additions & 0 deletions hw/net/igb_regs.h
Expand Up @@ -160,6 +160,9 @@ union e1000_adv_rx_desc {
#define E1000_MRQC_RSS_FIELD_IPV6_UDP 0x00800000
#define E1000_MRQC_RSS_FIELD_IPV6_UDP_EX 0x01000000

/* Additional Transmit Descriptor Control definitions */
#define E1000_TXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Tx Queue */

/* Additional Receive Descriptor Control definitions */
#define E1000_RXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Rx Queue */

Expand Down Expand Up @@ -240,6 +243,9 @@ union e1000_adv_rx_desc {

/* from igb/e1000_defines.h */

/* Physical Func Reset Done Indication */
#define E1000_CTRL_EXT_PFRSTD 0x00004000

#define E1000_IVAR_VALID 0x80
#define E1000_GPIE_NSICR 0x00000001
#define E1000_GPIE_MSIX_MODE 0x00000010
Expand Down
27 changes: 23 additions & 4 deletions hw/net/imx_fec.c
Expand Up @@ -282,11 +282,19 @@ static uint32_t imx_phy_read(IMXFECState *s, int reg)
uint32_t val;
uint32_t phy = reg / 32;

if (phy != s->phy_num) {
trace_imx_phy_read_num(phy, s->phy_num);
if (!s->phy_connected) {
return 0xffff;
}

if (phy != s->phy_num) {
if (s->phy_consumer && phy == s->phy_consumer->phy_num) {
s = s->phy_consumer;
} else {
trace_imx_phy_read_num(phy, s->phy_num);
return 0xffff;
}
}

reg %= 32;

switch (reg) {
Expand Down Expand Up @@ -343,11 +351,19 @@ static void imx_phy_write(IMXFECState *s, int reg, uint32_t val)
{
uint32_t phy = reg / 32;

if (phy != s->phy_num) {
trace_imx_phy_write_num(phy, s->phy_num);
if (!s->phy_connected) {
return;
}

if (phy != s->phy_num) {
if (s->phy_consumer && phy == s->phy_consumer->phy_num) {
s = s->phy_consumer;
} else {
trace_imx_phy_write_num(phy, s->phy_num);
return;
}
}

reg %= 32;

trace_imx_phy_write(val, phy, reg);
Expand Down Expand Up @@ -1327,6 +1343,9 @@ static Property imx_eth_properties[] = {
DEFINE_NIC_PROPERTIES(IMXFECState, conf),
DEFINE_PROP_UINT32("tx-ring-num", IMXFECState, tx_ring_num, 1),
DEFINE_PROP_UINT32("phy-num", IMXFECState, phy_num, 0),
DEFINE_PROP_BOOL("phy-connected", IMXFECState, phy_connected, true),
DEFINE_PROP_LINK("phy-consumer", IMXFECState, phy_consumer, TYPE_IMX_FEC,
IMXFECState *),
DEFINE_PROP_END_OF_LIST(),
};

Expand Down
30 changes: 17 additions & 13 deletions hw/net/net_tx_pkt.c
Expand Up @@ -43,7 +43,11 @@ struct NetTxPkt {
struct iovec *vec;

uint8_t l2_hdr[ETH_MAX_L2_HDR_LEN];
uint8_t l3_hdr[ETH_MAX_IP_DGRAM_LEN];
union {
struct ip_header ip;
struct ip6_header ip6;
uint8_t octets[ETH_MAX_IP_DGRAM_LEN];
} l3_hdr;

uint32_t payload_len;

Expand Down Expand Up @@ -89,16 +93,14 @@ void net_tx_pkt_update_ip_hdr_checksum(struct NetTxPkt *pkt)
{
uint16_t csum;
assert(pkt);
struct ip_header *ip_hdr;
ip_hdr = pkt->vec[NET_TX_PKT_L3HDR_FRAG].iov_base;

ip_hdr->ip_len = cpu_to_be16(pkt->payload_len +
pkt->l3_hdr.ip.ip_len = cpu_to_be16(pkt->payload_len +
pkt->vec[NET_TX_PKT_L3HDR_FRAG].iov_len);

ip_hdr->ip_sum = 0;
csum = net_raw_checksum((uint8_t *)ip_hdr,
pkt->l3_hdr.ip.ip_sum = 0;
csum = net_raw_checksum(pkt->l3_hdr.octets,
pkt->vec[NET_TX_PKT_L3HDR_FRAG].iov_len);
ip_hdr->ip_sum = cpu_to_be16(csum);
pkt->l3_hdr.ip.ip_sum = cpu_to_be16(csum);
}

void net_tx_pkt_update_ip_checksums(struct NetTxPkt *pkt)
Expand Down Expand Up @@ -443,7 +445,7 @@ void net_tx_pkt_dump(struct NetTxPkt *pkt)
#endif
}

void net_tx_pkt_reset(struct NetTxPkt *pkt)
void net_tx_pkt_reset(struct NetTxPkt *pkt, PCIDevice *pci_dev)
{
int i;

Expand All @@ -467,6 +469,7 @@ void net_tx_pkt_reset(struct NetTxPkt *pkt)
pkt->raw[i].iov_len, DMA_DIRECTION_TO_DEVICE, 0);
}
}
pkt->pci_dev = pci_dev;
pkt->raw_frags = 0;

pkt->hdr_len = 0;
Expand Down Expand Up @@ -795,19 +798,21 @@ bool net_tx_pkt_send_custom(struct NetTxPkt *pkt, bool offload,
{
assert(pkt);

uint8_t gso_type = pkt->virt_hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN;

/*
* Since underlying infrastructure does not support IP datagrams longer
* than 64K we should drop such packets and don't even try to send
*/
if (VIRTIO_NET_HDR_GSO_NONE != pkt->virt_hdr.gso_type) {
if (VIRTIO_NET_HDR_GSO_NONE != gso_type) {
if (pkt->payload_len >
ETH_MAX_IP_DGRAM_LEN -
pkt->vec[NET_TX_PKT_L3HDR_FRAG].iov_len) {
return false;
}
}

if (offload || pkt->virt_hdr.gso_type == VIRTIO_NET_HDR_GSO_NONE) {
if (offload || gso_type == VIRTIO_NET_HDR_GSO_NONE) {
if (!offload && pkt->virt_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
net_tx_pkt_do_sw_csum(pkt, &pkt->vec[NET_TX_PKT_L2HDR_FRAG],
pkt->payload_frags + NET_TX_PKT_PL_START_FRAG - 1,
Expand All @@ -829,15 +834,14 @@ void net_tx_pkt_fix_ip6_payload_len(struct NetTxPkt *pkt)
{
struct iovec *l2 = &pkt->vec[NET_TX_PKT_L2HDR_FRAG];
if (eth_get_l3_proto(l2, 1, l2->iov_len) == ETH_P_IPV6) {
struct ip6_header *ip6 = (struct ip6_header *) pkt->l3_hdr;
/*
* TODO: if qemu would support >64K packets - add jumbo option check
* something like that:
* 'if (ip6->ip6_plen == 0 && !has_jumbo_option(ip6)) {'
*/
if (ip6->ip6_plen == 0) {
if (pkt->l3_hdr.ip6.ip6_plen == 0) {
if (pkt->payload_len <= ETH_MAX_IP_DGRAM_LEN) {
ip6->ip6_plen = htons(pkt->payload_len);
pkt->l3_hdr.ip6.ip6_plen = htons(pkt->payload_len);
}
/*
* TODO: if qemu would support >64K packets
Expand Down
3 changes: 2 additions & 1 deletion hw/net/net_tx_pkt.h
Expand Up @@ -148,9 +148,10 @@ void net_tx_pkt_dump(struct NetTxPkt *pkt);
* reset tx packet private context (needed to be called between packets)
*
* @pkt: packet
* @dev: PCI device processing the next packet
*
*/
void net_tx_pkt_reset(struct NetTxPkt *pkt);
void net_tx_pkt_reset(struct NetTxPkt *pkt, PCIDevice *dev);

/**
* Send packet to qemu. handles sw offloads if vhdr is not supported.
Expand Down
2 changes: 2 additions & 0 deletions hw/net/trace-events
Expand Up @@ -280,6 +280,8 @@ igb_core_mdic_read_unhandled(uint32_t addr) "MDIC READ: PHY[%u] UNHANDLED"
igb_core_mdic_write(uint32_t addr, uint32_t data) "MDIC WRITE: PHY[%u] = 0x%x"
igb_core_mdic_write_unhandled(uint32_t addr) "MDIC WRITE: PHY[%u] UNHANDLED"

igb_link_set_ext_params(bool asd_check, bool speed_select_bypass, bool pfrstd) "Set extended link params: ASD check: %d, Speed select bypass: %d, PF reset done: %d"

igb_rx_desc_buff_size(uint32_t b) "buffer size: %u"
igb_rx_desc_buff_write(uint64_t addr, uint16_t offset, const void* source, uint32_t len) "addr: 0x%"PRIx64", offset: %u, from: %p, length: %u"

Expand Down
4 changes: 2 additions & 2 deletions hw/net/vmxnet3.c
Expand Up @@ -678,7 +678,7 @@ static void vmxnet3_process_tx_queue(VMXNET3State *s, int qidx)
vmxnet3_complete_packet(s, qidx, txd_idx);
s->tx_sop = true;
s->skip_current_tx_pkt = false;
net_tx_pkt_reset(s->tx_pkt);
net_tx_pkt_reset(s->tx_pkt, PCI_DEVICE(s));
}
}
}
Expand Down Expand Up @@ -1159,7 +1159,7 @@ static void vmxnet3_deactivate_device(VMXNET3State *s)
{
if (s->device_active) {
VMW_CBPRN("Deactivating vmxnet3...");
net_tx_pkt_reset(s->tx_pkt);
net_tx_pkt_reset(s->tx_pkt, PCI_DEVICE(s));
net_tx_pkt_uninit(s->tx_pkt);
net_rx_pkt_uninit(s->rx_pkt);
s->device_active = false;
Expand Down
33 changes: 17 additions & 16 deletions hw/nvme/ctrl.c
Expand Up @@ -1434,26 +1434,26 @@ uint16_t nvme_bounce_mdata(NvmeCtrl *n, void *ptr, uint32_t len,
}

static inline void nvme_blk_read(BlockBackend *blk, int64_t offset,
BlockCompletionFunc *cb, NvmeRequest *req)
uint32_t align, BlockCompletionFunc *cb,
NvmeRequest *req)
{
assert(req->sg.flags & NVME_SG_ALLOC);

if (req->sg.flags & NVME_SG_DMA) {
req->aiocb = dma_blk_read(blk, &req->sg.qsg, offset, BDRV_SECTOR_SIZE,
cb, req);
req->aiocb = dma_blk_read(blk, &req->sg.qsg, offset, align, cb, req);
} else {
req->aiocb = blk_aio_preadv(blk, offset, &req->sg.iov, 0, cb, req);
}
}

static inline void nvme_blk_write(BlockBackend *blk, int64_t offset,
BlockCompletionFunc *cb, NvmeRequest *req)
uint32_t align, BlockCompletionFunc *cb,
NvmeRequest *req)
{
assert(req->sg.flags & NVME_SG_ALLOC);

if (req->sg.flags & NVME_SG_DMA) {
req->aiocb = dma_blk_write(blk, &req->sg.qsg, offset, BDRV_SECTOR_SIZE,
cb, req);
req->aiocb = dma_blk_write(blk, &req->sg.qsg, offset, align, cb, req);
} else {
req->aiocb = blk_aio_pwritev(blk, offset, &req->sg.iov, 0, cb, req);
}
Expand Down Expand Up @@ -2207,10 +2207,10 @@ static void nvme_rw_cb(void *opaque, int ret)
}

if (req->cmd.opcode == NVME_CMD_READ) {
return nvme_blk_read(blk, offset, nvme_rw_complete_cb, req);
return nvme_blk_read(blk, offset, 1, nvme_rw_complete_cb, req);
}

return nvme_blk_write(blk, offset, nvme_rw_complete_cb, req);
return nvme_blk_write(blk, offset, 1, nvme_rw_complete_cb, req);
}
}

Expand Down Expand Up @@ -2378,7 +2378,7 @@ static void nvme_compare_mdata_cb(void *opaque, int ret)

for (bufp = buf; mbufp < end; bufp += ns->lbaf.ms, mbufp += ns->lbaf.ms) {
if (memcmp(bufp + pil, mbufp + pil, ns->lbaf.ms - pil)) {
req->status = NVME_CMP_FAILURE;
req->status = NVME_CMP_FAILURE | NVME_DNR;
goto out;
}
}
Expand All @@ -2387,7 +2387,7 @@ static void nvme_compare_mdata_cb(void *opaque, int ret)
}

if (memcmp(buf, ctx->mdata.bounce, ctx->mdata.iov.size)) {
req->status = NVME_CMP_FAILURE;
req->status = NVME_CMP_FAILURE | NVME_DNR;
goto out;
}

Expand Down Expand Up @@ -2436,7 +2436,7 @@ static void nvme_compare_data_cb(void *opaque, int ret)
}

if (memcmp(buf, ctx->data.bounce, ctx->data.iov.size)) {
req->status = NVME_CMP_FAILURE;
req->status = NVME_CMP_FAILURE | NVME_DNR;
goto out;
}

Expand Down Expand Up @@ -2619,6 +2619,9 @@ static uint16_t nvme_dsm(NvmeCtrl *n, NvmeRequest *req)
status = nvme_h2c(n, (uint8_t *)iocb->range, sizeof(NvmeDsmRange) * nr,
req);
if (status) {
g_free(iocb->range);
qemu_aio_unref(iocb);

return status;
}

Expand Down Expand Up @@ -3437,7 +3440,7 @@ static uint16_t nvme_read(NvmeCtrl *n, NvmeRequest *req)

block_acct_start(blk_get_stats(blk), &req->acct, data_size,
BLOCK_ACCT_READ);
nvme_blk_read(blk, data_offset, nvme_rw_cb, req);
nvme_blk_read(blk, data_offset, BDRV_SECTOR_SIZE, nvme_rw_cb, req);
return NVME_NO_COMPLETE;

invalid:
Expand Down Expand Up @@ -3607,7 +3610,7 @@ static uint16_t nvme_do_write(NvmeCtrl *n, NvmeRequest *req, bool append,

block_acct_start(blk_get_stats(blk), &req->acct, data_size,
BLOCK_ACCT_WRITE);
nvme_blk_write(blk, data_offset, nvme_rw_cb, req);
nvme_blk_write(blk, data_offset, BDRV_SECTOR_SIZE, nvme_rw_cb, req);
} else {
req->aiocb = blk_aio_pwrite_zeroes(blk, data_offset, data_size,
BDRV_REQ_MAY_UNMAP, nvme_rw_cb,
Expand Down Expand Up @@ -7155,9 +7158,7 @@ static int nvme_start_ctrl(NvmeCtrl *n)

if (pci_is_vf(PCI_DEVICE(n)) && !sctrl->scs) {
trace_pci_nvme_err_startfail_virt_state(le16_to_cpu(sctrl->nvi),
le16_to_cpu(sctrl->nvq),
sctrl->scs ? "ONLINE" :
"OFFLINE");
le16_to_cpu(sctrl->nvq));
return -1;
}
if (unlikely(n->cq[0])) {
Expand Down
3 changes: 2 additions & 1 deletion hw/nvme/ns.c
Expand Up @@ -399,7 +399,8 @@ static bool nvme_ns_init_fdp(NvmeNamespace *ns, Error **errp)
NvmeEnduranceGroup *endgrp = ns->endgrp;
NvmeRuHandle *ruh;
uint8_t lbafi = NVME_ID_NS_FLBAS_INDEX(ns->id_ns.flbas);
unsigned int *ruhid, *ruhids;
g_autofree unsigned int *ruhids = NULL;
unsigned int *ruhid;
char *r, *p, *token;
uint16_t *ph;

Expand Down
2 changes: 1 addition & 1 deletion hw/nvme/trace-events
Expand Up @@ -187,7 +187,7 @@ pci_nvme_err_startfail_asqent_sz_zero(void) "nvme_start_ctrl failed because the
pci_nvme_err_startfail_acqent_sz_zero(void) "nvme_start_ctrl failed because the admin completion queue size is zero"
pci_nvme_err_startfail_zasl_too_small(uint32_t zasl, uint32_t pagesz) "nvme_start_ctrl failed because zone append size limit %"PRIu32" is too small, needs to be >= %"PRIu32""
pci_nvme_err_startfail(void) "setting controller enable bit failed"
pci_nvme_err_startfail_virt_state(uint16_t vq, uint16_t vi, const char *state) "nvme_start_ctrl failed due to ctrl state: vi=%u vq=%u %s"
pci_nvme_err_startfail_virt_state(uint16_t vq, uint16_t vi) "nvme_start_ctrl failed due to ctrl state: vi=%u vq=%u"
pci_nvme_err_invalid_mgmt_action(uint8_t action) "action=0x%"PRIx8""
pci_nvme_err_ignored_mmio_vf_offline(uint64_t addr, unsigned size) "addr 0x%"PRIx64" size %d"

Expand Down
18 changes: 6 additions & 12 deletions hw/pci-host/gt64120.c
Expand Up @@ -321,9 +321,6 @@ static void gt64120_isd_mapping(GT64120State *s)
static void gt64120_update_pci_cfgdata_mapping(GT64120State *s)
{
/* Indexed on MByteSwap bit, see Table 158: PCI_0 Command, Offset: 0xc00 */
static const MemoryRegionOps *pci_host_conf_ops[] = {
&pci_host_conf_be_ops, &pci_host_conf_le_ops
};
static const MemoryRegionOps *pci_host_data_ops[] = {
&pci_host_data_be_ops, &pci_host_data_le_ops
};
Expand All @@ -339,15 +336,6 @@ static void gt64120_update_pci_cfgdata_mapping(GT64120State *s)
* - Table 16: 32-bit PCI Transaction Endianess
* - Table 158: PCI_0 Command, Offset: 0xc00
*/
if (memory_region_is_mapped(&phb->conf_mem)) {
memory_region_del_subregion(&s->ISD_mem, &phb->conf_mem);
object_unparent(OBJECT(&phb->conf_mem));
}
memory_region_init_io(&phb->conf_mem, OBJECT(phb),
pci_host_conf_ops[s->regs[GT_PCI0_CMD] & 1],
s, "pci-conf-idx", 4);
memory_region_add_subregion_overlap(&s->ISD_mem, GT_PCI0_CFGADDR << 2,
&phb->conf_mem, 1);

if (memory_region_is_mapped(&phb->data_mem)) {
memory_region_del_subregion(&s->ISD_mem, &phb->data_mem);
Expand Down Expand Up @@ -1208,6 +1196,12 @@ static void gt64120_realize(DeviceState *dev, Error **errp)
PCI_DEVFN(18, 0), TYPE_PCI_BUS);

pci_create_simple(phb->bus, PCI_DEVFN(0, 0), "gt64120_pci");
memory_region_init_io(&phb->conf_mem, OBJECT(phb),
&pci_host_conf_le_ops,
s, "pci-conf-idx", 4);
memory_region_add_subregion_overlap(&s->ISD_mem, GT_PCI0_CFGADDR << 2,
&phb->conf_mem, 1);


/*
* The whole address space decoded by the GT-64120A doesn't generate
Expand Down
15 changes: 13 additions & 2 deletions hw/ppc/spapr.c
Expand Up @@ -4734,15 +4734,26 @@ static void spapr_machine_latest_class_options(MachineClass *mc)
} \
type_init(spapr_machine_register_##suffix)

/*
* pseries-8.1
*/
static void spapr_machine_8_1_class_options(MachineClass *mc)
{
/* Defaults for the latest behaviour inherited from the base class */
}

DEFINE_SPAPR_MACHINE(8_1, "8.1", true);

/*
* pseries-8.0
*/
static void spapr_machine_8_0_class_options(MachineClass *mc)
{
/* Defaults for the latest behaviour inherited from the base class */
spapr_machine_8_1_class_options(mc);
compat_props_add(mc->compat_props, hw_compat_8_0, hw_compat_8_0_len);
}

DEFINE_SPAPR_MACHINE(8_0, "8.0", true);
DEFINE_SPAPR_MACHINE(8_0, "8.0", false);

/*
* pseries-7.2
Expand Down
29 changes: 29 additions & 0 deletions hw/ppc/spapr_rtas.c
Expand Up @@ -33,6 +33,7 @@
#include "sysemu/cpus.h"
#include "sysemu/hw_accel.h"
#include "sysemu/runstate.h"
#include "sysemu/qtest.h"
#include "kvm_ppc.h"

#include "hw/ppc/spapr.h"
Expand Down Expand Up @@ -548,6 +549,32 @@ uint64_t qtest_rtas_call(char *cmd, uint32_t nargs, uint64_t args,
return H_PARAMETER;
}

static bool spapr_qtest_callback(CharBackend *chr, gchar **words)
{
if (strcmp(words[0], "rtas") == 0) {
uint64_t res, args, ret;
unsigned long nargs, nret;
int rc;

rc = qemu_strtoul(words[2], NULL, 0, &nargs);
g_assert(rc == 0);
rc = qemu_strtou64(words[3], NULL, 0, &args);
g_assert(rc == 0);
rc = qemu_strtoul(words[4], NULL, 0, &nret);
g_assert(rc == 0);
rc = qemu_strtou64(words[5], NULL, 0, &ret);
g_assert(rc == 0);
res = qtest_rtas_call(words[1], nargs, args, nret, ret);

qtest_send_prefix(chr);
qtest_sendf(chr, "OK %"PRIu64"\n", res);

return true;
}

return false;
}

void spapr_rtas_register(int token, const char *name, spapr_rtas_fn fn)
{
assert((token >= RTAS_TOKEN_BASE) && (token < RTAS_TOKEN_MAX));
Expand Down Expand Up @@ -630,6 +657,8 @@ static void core_rtas_register_types(void)
rtas_ibm_nmi_register);
spapr_rtas_register(RTAS_IBM_NMI_INTERLOCK, "ibm,nmi-interlock",
rtas_ibm_nmi_interlock);

qtest_set_command_cb(spapr_qtest_callback);
}

type_init(core_rtas_register_types)
14 changes: 13 additions & 1 deletion hw/s390x/s390-virtio-ccw.c
Expand Up @@ -826,14 +826,26 @@ bool css_migration_enabled(void)
} \
type_init(ccw_machine_register_##suffix)

static void ccw_machine_8_1_instance_options(MachineState *machine)
{
}

static void ccw_machine_8_1_class_options(MachineClass *mc)
{
}
DEFINE_CCW_MACHINE(8_1, "8.1", true);

static void ccw_machine_8_0_instance_options(MachineState *machine)
{
ccw_machine_8_1_instance_options(machine);
}

static void ccw_machine_8_0_class_options(MachineClass *mc)
{
ccw_machine_8_1_class_options(mc);
compat_props_add(mc->compat_props, hw_compat_8_0, hw_compat_8_0_len);
}
DEFINE_CCW_MACHINE(8_0, "8.0", true);
DEFINE_CCW_MACHINE(8_0, "8.0", false);

static void ccw_machine_7_2_instance_options(MachineState *machine)
{
Expand Down
1 change: 1 addition & 0 deletions hw/ssi/xilinx_spi.c
Expand Up @@ -156,6 +156,7 @@ static void xlx_spi_do_reset(XilinxSPI *s)
txfifo_reset(s);

s->regs[R_SPISSR] = ~0;
s->regs[R_SPICR] = R_SPICR_MTI;
xlx_spi_update_irq(s);
xlx_spi_update_cs(s);
}
Expand Down
13 changes: 8 additions & 5 deletions hw/timer/exynos4210_mct.c
Expand Up @@ -480,11 +480,14 @@ static int32_t exynos4210_gcomp_find(Exynos4210MCTState *s)
res = min_comp_i;
}

DPRINTF("found comparator %d: comp 0x%llx distance 0x%llx, gfrc 0x%llx\n",
res,
s->g_timer.reg.comp[res],
distance_min,
gfrc);
if (res >= 0) {
DPRINTF("found comparator %d: "
"comp 0x%llx distance 0x%llx, gfrc 0x%llx\n",
res,
s->g_timer.reg.comp[res],
distance_min,
gfrc);
}

return res;
}
Expand Down
2 changes: 1 addition & 1 deletion hw/timer/imx_epit.c
Expand Up @@ -179,7 +179,7 @@ static void imx_epit_update_compare_timer(IMXEPITState *s)
* the compare value. Otherwise it may fire at most once in the
* current round.
*/
bool is_oneshot = (limit >= s->cmp);
is_oneshot = (limit < s->cmp);
if (counter >= s->cmp) {
/* The compare timer fires in the current round. */
counter -= s->cmp;
Expand Down
7 changes: 7 additions & 0 deletions hw/tpm/Kconfig
@@ -1,3 +1,10 @@
config TPM_TIS_I2C
bool
depends on TPM
select TPM_BACKEND
select I2C
select TPM_TIS

config TPM_TIS_ISA
bool
depends on TPM && ISA_BUS
Expand Down
1 change: 1 addition & 0 deletions hw/tpm/meson.build
@@ -1,6 +1,7 @@
softmmu_ss.add(when: 'CONFIG_TPM_TIS', if_true: files('tpm_tis_common.c'))
softmmu_ss.add(when: 'CONFIG_TPM_TIS_ISA', if_true: files('tpm_tis_isa.c'))
softmmu_ss.add(when: 'CONFIG_TPM_TIS_SYSBUS', if_true: files('tpm_tis_sysbus.c'))
softmmu_ss.add(when: 'CONFIG_TPM_TIS_I2C', if_true: files('tpm_tis_i2c.c'))
softmmu_ss.add(when: 'CONFIG_TPM_CRB', if_true: files('tpm_crb.c'))
softmmu_ss.add(when: 'CONFIG_TPM_TIS', if_true: files('tpm_ppi.c'))
softmmu_ss.add(when: 'CONFIG_TPM_CRB', if_true: files('tpm_ppi.c'))
Expand Down
3 changes: 3 additions & 0 deletions hw/tpm/tpm_tis.h
Expand Up @@ -86,5 +86,8 @@ int tpm_tis_pre_save(TPMState *s);
void tpm_tis_reset(TPMState *s);
enum TPMVersion tpm_tis_get_tpm_version(TPMState *s);
void tpm_tis_request_completed(TPMState *s, int ret);
uint32_t tpm_tis_read_data(TPMState *s, hwaddr addr, unsigned size);
void tpm_tis_write_data(TPMState *s, hwaddr addr, uint64_t val, uint32_t size);
uint16_t tpm_tis_get_checksum(TPMState *s);

#endif /* TPM_TPM_TIS_H */
36 changes: 28 additions & 8 deletions hw/tpm/tpm_tis_common.c
Expand Up @@ -26,6 +26,8 @@
#include "hw/irq.h"
#include "hw/isa/isa.h"
#include "qapi/error.h"
#include "qemu/bswap.h"
#include "qemu/crc-ccitt.h"
#include "qemu/module.h"

#include "hw/acpi/tpm.h"
Expand Down Expand Up @@ -447,6 +449,23 @@ static uint64_t tpm_tis_mmio_read(void *opaque, hwaddr addr,
return val;
}

/*
* A wrapper read function so that it can be directly called without
* mmio.
*/
uint32_t tpm_tis_read_data(TPMState *s, hwaddr addr, unsigned size)
{
return tpm_tis_mmio_read(s, addr, size);
}

/*
* Calculate current data buffer checksum
*/
uint16_t tpm_tis_get_checksum(TPMState *s)
{
return bswap16(crc_ccitt(0, s->buffer, s->rw_offset));
}

/*
* Write a value to a register of the TIS interface
* See specs pages 33-63 for description of the registers
Expand Down Expand Up @@ -588,10 +607,6 @@ static void tpm_tis_mmio_write(void *opaque, hwaddr addr,

break;
case TPM_TIS_REG_INT_ENABLE:
if (s->active_locty != locty) {
break;
}

s->loc[locty].inte &= mask;
s->loc[locty].inte |= (val & (TPM_TIS_INT_ENABLED |
TPM_TIS_INT_POLARITY_MASK |
Expand All @@ -601,10 +616,6 @@ static void tpm_tis_mmio_write(void *opaque, hwaddr addr,
/* hard wired -- ignore */
break;
case TPM_TIS_REG_INT_STATUS:
if (s->active_locty != locty) {
break;
}

/* clearing of interrupt flags */
if (((val & TPM_TIS_INTERRUPTS_SUPPORTED)) &&
(s->loc[locty].ints & TPM_TIS_INTERRUPTS_SUPPORTED)) {
Expand Down Expand Up @@ -767,6 +778,15 @@ static void tpm_tis_mmio_write(void *opaque, hwaddr addr,
}
}

/*
* A wrapper write function so that it can be directly called without
* mmio.
*/
void tpm_tis_write_data(TPMState *s, hwaddr addr, uint64_t val, uint32_t size)
{
tpm_tis_mmio_write(s, addr, val, size);
}

const MemoryRegionOps tpm_tis_memory_ops = {
.read = tpm_tis_mmio_read,
.write = tpm_tis_mmio_write,
Expand Down
571 changes: 571 additions & 0 deletions hw/tpm/tpm_tis_i2c.c

Large diffs are not rendered by default.

6 changes: 6 additions & 0 deletions hw/tpm/trace-events
Expand Up @@ -36,3 +36,9 @@ tpm_spapr_do_crq_unknown_msg_type(uint8_t type) "Unknown message type 0x%02x"
tpm_spapr_do_crq_unknown_crq(uint8_t raw1, uint8_t raw2) "unknown CRQ 0x%02x 0x%02x ..."
tpm_spapr_post_load(void) "Delivering TPM response after resume"
tpm_spapr_caught_response(uint32_t v) "Caught response to deliver after resume: %u bytes"

# tpm_tis_i2c.c
tpm_tis_i2c_recv(uint8_t data) "TPM I2C read: 0x%X"
tpm_tis_i2c_send(uint8_t data) "TPM I2C write: 0x%X"
tpm_tis_i2c_event(const char *event) "TPM I2C event: %s"
tpm_tis_i2c_send_reg(const char *name, int reg) "TPM I2C write register: %s(0x%X)"
4 changes: 4 additions & 0 deletions hw/watchdog/Kconfig
Expand Up @@ -20,3 +20,7 @@ config WDT_IMX2

config WDT_SBSA
bool

config ALLWINNER_WDT
bool
select PTIMER
416 changes: 416 additions & 0 deletions hw/watchdog/allwinner-wdt.c

Large diffs are not rendered by default.

1 change: 1 addition & 0 deletions hw/watchdog/meson.build
@@ -1,4 +1,5 @@
softmmu_ss.add(files('watchdog.c'))
softmmu_ss.add(when: 'CONFIG_ALLWINNER_WDT', if_true: files('allwinner-wdt.c'))
softmmu_ss.add(when: 'CONFIG_CMSDK_APB_WATCHDOG', if_true: files('cmsdk-apb-watchdog.c'))
softmmu_ss.add(when: 'CONFIG_WDT_IB6300ESB', if_true: files('wdt_i6300esb.c'))
softmmu_ss.add(when: 'CONFIG_WDT_IB700', if_true: files('wdt_ib700.c'))
Expand Down
7 changes: 7 additions & 0 deletions hw/watchdog/trace-events
@@ -1,5 +1,12 @@
# See docs/devel/tracing.rst for syntax documentation.

# allwinner-wdt.c
allwinner_wdt_read(uint64_t offset, uint64_t data, unsigned size) "Allwinner watchdog read: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %u"
allwinner_wdt_write(uint64_t offset, uint64_t data, unsigned size) "Allwinner watchdog write: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %u"
allwinner_wdt_reset_enter(void) "Allwinner watchdog: reset"
allwinner_wdt_update_timer(uint8_t count) "Allwinner watchdog: count %" PRIu8
allwinner_wdt_expired(bool enabled, bool reset_enabled) "Allwinner watchdog: enabled %u reset_enabled %u"

# cmsdk-apb-watchdog.c
cmsdk_apb_watchdog_read(uint64_t offset, uint64_t data, unsigned size) "CMSDK APB watchdog read: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %u"
cmsdk_apb_watchdog_write(uint64_t offset, uint64_t data, unsigned size) "CMSDK APB watchdog write: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %u"
Expand Down
2 changes: 2 additions & 0 deletions hw/xenpv/xen_machine_pv.c
Expand Up @@ -35,6 +35,8 @@ static void xen_init_pv(MachineState *machine)
DriveInfo *dinfo;
int i;

setup_xen_backend_ops();

/* Initialize backend core & drivers */
xen_be_init();

Expand Down
3 changes: 1 addition & 2 deletions include/block/block-io.h
Expand Up @@ -79,7 +79,7 @@ bdrv_co_truncate(BdrvChild *child, int64_t offset, bool exact,
PreallocMode prealloc, BdrvRequestFlags flags, Error **errp);

int64_t coroutine_fn GRAPH_RDLOCK bdrv_co_nb_sectors(BlockDriverState *bs);
int64_t co_wrapper_mixed_bdrv_rdlock bdrv_nb_sectors(BlockDriverState *bs);
int64_t coroutine_mixed_fn bdrv_nb_sectors(BlockDriverState *bs);

int64_t coroutine_fn GRAPH_RDLOCK bdrv_co_getlength(BlockDriverState *bs);
int64_t co_wrapper_mixed_bdrv_rdlock bdrv_getlength(BlockDriverState *bs);
Expand All @@ -89,7 +89,6 @@ int64_t co_wrapper bdrv_get_allocated_file_size(BlockDriverState *bs);

BlockMeasureInfo *bdrv_measure(BlockDriver *drv, QemuOpts *opts,
BlockDriverState *in_bs, Error **errp);
void bdrv_get_geometry(BlockDriverState *bs, uint64_t *nb_sectors_ptr);

int coroutine_fn GRAPH_RDLOCK
bdrv_co_delete_file(BlockDriverState *bs, Error **errp);
Expand Down
10 changes: 8 additions & 2 deletions include/block/block_int-common.h
Expand Up @@ -158,8 +158,6 @@ struct BlockDriver {
*/
bool supports_backing;

bool has_variable_length;

/*
* Drivers setting this field must be able to work with just a plain
* filename with '<protocol_name>:' as a prefix, and no other options.
Expand Down Expand Up @@ -855,6 +853,14 @@ typedef struct BlockLimits {

/* maximum number of iovec elements */
int max_iov;

/*
* true if the length of the underlying file can change, and QEMU
* is expected to adjust automatically. Mostly for CD-ROM drives,
* whose length is zero when the tray is empty (they don't need
* an explicit monitor command to load the disk inside the guest).
*/
bool has_variable_length;
} BlockLimits;

typedef struct BdrvOpBlocker BdrvOpBlocker;
Expand Down
79 changes: 13 additions & 66 deletions include/exec/cpu-all.h
Expand Up @@ -21,6 +21,7 @@

#include "exec/cpu-common.h"
#include "exec/memory.h"
#include "exec/tswap.h"
#include "qemu/thread.h"
#include "hw/core/cpu.h"
#include "qemu/rcu.h"
Expand All @@ -44,69 +45,6 @@
#define BSWAP_NEEDED
#endif

#ifdef BSWAP_NEEDED

static inline uint16_t tswap16(uint16_t s)
{
return bswap16(s);
}

static inline uint32_t tswap32(uint32_t s)
{
return bswap32(s);
}

static inline uint64_t tswap64(uint64_t s)
{
return bswap64(s);
}

static inline void tswap16s(uint16_t *s)
{
*s = bswap16(*s);
}

static inline void tswap32s(uint32_t *s)
{
*s = bswap32(*s);
}

static inline void tswap64s(uint64_t *s)
{
*s = bswap64(*s);
}

#else

static inline uint16_t tswap16(uint16_t s)
{
return s;
}

static inline uint32_t tswap32(uint32_t s)
{
return s;
}

static inline uint64_t tswap64(uint64_t s)
{
return s;
}

static inline void tswap16s(uint16_t *s)
{
}

static inline void tswap32s(uint32_t *s)
{
}

static inline void tswap64s(uint64_t *s)
{
}

#endif

#if TARGET_LONG_SIZE == 4
#define tswapl(s) tswap32(s)
#define tswapls(s) tswap32s((uint32_t *)(s))
Expand Down Expand Up @@ -152,6 +90,15 @@ static inline void tswap64s(uint64_t *s)
*/
extern uintptr_t guest_base;
extern bool have_guest_base;

/*
* If non-zero, the guest virtual address space is a contiguous subset
* of the host virtual address space, i.e. '-R reserved_va' is in effect
* either from the command-line or by default. The value is the last
* byte of the guest address space e.g. UINT32_MAX.
*
* If zero, the host and guest virtual address spaces are intermingled.
*/
extern unsigned long reserved_va;

/*
Expand All @@ -171,7 +118,7 @@ extern unsigned long reserved_va;
#define GUEST_ADDR_MAX_ \
((MIN_CONST(TARGET_VIRT_ADDR_SPACE_BITS, TARGET_ABI_BITS) <= 32) ? \
UINT32_MAX : ~0ul)
#define GUEST_ADDR_MAX (reserved_va ? reserved_va - 1 : GUEST_ADDR_MAX_)
#define GUEST_ADDR_MAX (reserved_va ? : GUEST_ADDR_MAX_)

#else

Expand Down Expand Up @@ -276,8 +223,8 @@ typedef int (*walk_memory_regions_fn)(void *, target_ulong,
int walk_memory_regions(void *, walk_memory_regions_fn);

int page_get_flags(target_ulong address);
void page_set_flags(target_ulong start, target_ulong end, int flags);
void page_reset_target_data(target_ulong start, target_ulong end);
void page_set_flags(target_ulong start, target_ulong last, int flags);
void page_reset_target_data(target_ulong start, target_ulong last);
int page_check_range(target_ulong start, target_ulong len, int flags);

/**
Expand Down
2 changes: 1 addition & 1 deletion include/exec/cpu-common.h
Expand Up @@ -165,6 +165,6 @@ int cpu_memory_rw_debug(CPUState *cpu, vaddr addr,
/* vl.c */
extern int singlestep;

void list_cpus(const char *optarg);
void list_cpus(void);

#endif /* CPU_COMMON_H */
2 changes: 1 addition & 1 deletion include/exec/exec-all.h
Expand Up @@ -678,7 +678,7 @@ void tb_invalidate_phys_addr(target_ulong addr);
void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr, MemTxAttrs attrs);
#endif
void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr);
void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end);
void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t last);
void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr);

/* GETPC is the true target of the return instruction that we'll execute. */
Expand Down
72 changes: 72 additions & 0 deletions include/exec/tswap.h
@@ -0,0 +1,72 @@
/*
* Macros for swapping a value if the endianness is different
* between the target and the host.
*
* SPDX-License-Identifier: LGPL-2.1-or-later
*/

#ifndef TSWAP_H
#define TSWAP_H

#include "hw/core/cpu.h"
#include "qemu/bswap.h"

/*
* If we're in target-specific code, we can hard-code the swapping
* condition, otherwise we have to do (slower) run-time checks.
*/
#ifdef NEED_CPU_H
#define target_needs_bswap() (HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN)
#else
#define target_needs_bswap() (target_words_bigendian() != HOST_BIG_ENDIAN)
#endif

static inline uint16_t tswap16(uint16_t s)
{
if (target_needs_bswap()) {
return bswap16(s);
} else {
return s;
}
}

static inline uint32_t tswap32(uint32_t s)
{
if (target_needs_bswap()) {
return bswap32(s);
} else {
return s;
}
}

static inline uint64_t tswap64(uint64_t s)
{
if (target_needs_bswap()) {
return bswap64(s);
} else {
return s;
}
}

static inline void tswap16s(uint16_t *s)
{
if (target_needs_bswap()) {
*s = bswap16(*s);
}
}

static inline void tswap32s(uint32_t *s)
{
if (target_needs_bswap()) {
*s = bswap32(*s);
}
}

static inline void tswap64s(uint64_t *s)
{
if (target_needs_bswap()) {
*s = bswap64(*s);
}
}

#endif /* TSWAP_H */
41 changes: 41 additions & 0 deletions include/hw/acpi/tpm.h
Expand Up @@ -93,6 +93,7 @@
#define TPM_TIS_CAP_DATA_TRANSFER_64B (3 << 9)
#define TPM_TIS_CAP_DATA_TRANSFER_LEGACY (0 << 9)
#define TPM_TIS_CAP_BURST_COUNT_DYNAMIC (0 << 8)
#define TPM_TIS_CAP_BURST_COUNT_STATIC (1 << 8)
#define TPM_TIS_CAP_INTERRUPT_LOW_LEVEL (1 << 4) /* support is mandatory */
#define TPM_TIS_CAPABILITIES_SUPPORTED1_3 \
(TPM_TIS_CAP_INTERRUPT_LOW_LEVEL | \
Expand Down Expand Up @@ -209,6 +210,46 @@ REG32(CRB_DATA_BUFFER, 0x80)
#define TPM_PPI_FUNC_ALLOWED_USR_NOT_REQ (4 << 0)
#define TPM_PPI_FUNC_MASK (7 << 0)

/* TPM TIS I2C registers */
#define TPM_I2C_REG_LOC_SEL 0x00
#define TPM_I2C_REG_ACCESS 0x04
#define TPM_I2C_REG_INT_ENABLE 0x08
#define TPM_I2C_REG_INT_CAPABILITY 0x14
#define TPM_I2C_REG_STS 0x18
#define TPM_I2C_REG_DATA_FIFO 0x24
#define TPM_I2C_REG_INTF_CAPABILITY 0x30
#define TPM_I2C_REG_I2C_DEV_ADDRESS 0x38
#define TPM_I2C_REG_DATA_CSUM_ENABLE 0x40
#define TPM_I2C_REG_DATA_CSUM_GET 0x44
#define TPM_I2C_REG_DID_VID 0x48
#define TPM_I2C_REG_RID 0x4c
#define TPM_I2C_REG_UNKNOWN 0xff

/* I2C specific interface capabilities */
#define TPM_I2C_CAP_INTERFACE_TYPE (0x2 << 0) /* FIFO interface */
#define TPM_I2C_CAP_INTERFACE_VER (0x0 << 4) /* TCG I2C intf 1.0 */
#define TPM_I2C_CAP_TPM2_FAMILY (0x1 << 7) /* TPM 2.0 family. */
#define TPM_I2C_CAP_DEV_ADDR_CHANGE (0x0 << 27) /* No dev addr chng */
#define TPM_I2C_CAP_BURST_COUNT_STATIC (0x1 << 29) /* Burst count static */
#define TPM_I2C_CAP_LOCALITY_CAP (0x1 << 25) /* 0-5 locality */
#define TPM_I2C_CAP_BUS_SPEED (3 << 21) /* std and fast mode */

/*
* TPM_I2C_STS masks for read/writing bits from/to TIS
* TPM_STS mask for read bits 31:26 must be zero
*/
#define TPM_I2C_STS_READ_MASK 0x00ffffdd
#define TPM_I2C_STS_WRITE_MASK 0x03000062

/* Checksum enabled. */
#define TPM_DATA_CSUM_ENABLED 0x1

/*
* TPM_I2C_INT_ENABLE mask. Linux kernel does not support
* interrupts hence setting it to 0.
*/
#define TPM_I2C_INT_ENABLE_MASK 0x0

void tpm_build_ppi_acpi(TPMIf *tpm, Aml *dev);

#endif /* CONFIG_TPM */
Expand Down
2 changes: 2 additions & 0 deletions include/hw/arm/allwinner-a10.h
Expand Up @@ -13,6 +13,7 @@
#include "hw/misc/allwinner-a10-ccm.h"
#include "hw/misc/allwinner-a10-dramc.h"
#include "hw/i2c/allwinner-i2c.h"
#include "hw/watchdog/allwinner-wdt.h"
#include "sysemu/block-backend.h"

#include "target/arm/cpu.h"
Expand Down Expand Up @@ -41,6 +42,7 @@ struct AwA10State {
AwSdHostState mmc0;
AWI2CState i2c0;
AwRtcState rtc;
AwWdtState wdt;
MemoryRegion sram_a;
EHCISysBusState ehci[AW_A10_NUM_USB];
OHCISysBusState ohci[AW_A10_NUM_USB];
Expand Down
5 changes: 4 additions & 1 deletion include/hw/arm/allwinner-h3.h
Expand Up @@ -48,6 +48,7 @@
#include "hw/net/allwinner-sun8i-emac.h"
#include "hw/rtc/allwinner-rtc.h"
#include "hw/i2c/allwinner-i2c.h"
#include "hw/watchdog/allwinner-wdt.h"
#include "target/arm/cpu.h"
#include "sysemu/block-backend.h"

Expand Down Expand Up @@ -96,7 +97,8 @@ enum {
AW_H3_DEV_RTC,
AW_H3_DEV_CPUCFG,
AW_H3_DEV_R_TWI,
AW_H3_DEV_SDRAM
AW_H3_DEV_SDRAM,
AW_H3_DEV_WDT
};

/** Total number of CPU cores in the H3 SoC */
Expand Down Expand Up @@ -141,6 +143,7 @@ struct AwH3State {
AWI2CState r_twi;
AwSun8iEmacState emac;
AwRtcState rtc;
AwWdtState wdt;
GICState gic;
MemoryRegion sram_a1;
MemoryRegion sram_a2;
Expand Down
1 change: 1 addition & 0 deletions include/hw/arm/fsl-imx6ul.h
Expand Up @@ -89,6 +89,7 @@ struct FslIMX6ULState {
MemoryRegion ocram_alias;

uint32_t phy_num[FSL_IMX6UL_NUM_ETHS];
bool phy_connected[FSL_IMX6UL_NUM_ETHS];
};

enum FslIMX6ULMemoryMap {
Expand Down
1 change: 1 addition & 0 deletions include/hw/arm/fsl-imx7.h
Expand Up @@ -82,6 +82,7 @@ struct FslIMX7State {
ChipideaState usb[FSL_IMX7_NUM_USBS];
DesignwarePCIEHost pcie;
uint32_t phy_num[FSL_IMX7_NUM_ETHS];
bool phy_connected[FSL_IMX7_NUM_ETHS];
};

enum FslIMX7MemoryMap {
Expand Down
20 changes: 10 additions & 10 deletions include/hw/block/flash.h
Expand Up @@ -53,22 +53,22 @@ void nand_setio(DeviceState *dev, uint32_t value);
uint32_t nand_getio(DeviceState *dev);
uint32_t nand_getbuswidth(DeviceState *dev);

#define NAND_MFR_TOSHIBA 0x98
#define NAND_MFR_SAMSUNG 0xec
#define NAND_MFR_FUJITSU 0x04
#define NAND_MFR_NATIONAL 0x8f
#define NAND_MFR_RENESAS 0x07
#define NAND_MFR_STMICRO 0x20
#define NAND_MFR_HYNIX 0xad
#define NAND_MFR_MICRON 0x2c
#define NAND_MFR_TOSHIBA 0x98
#define NAND_MFR_SAMSUNG 0xec
#define NAND_MFR_FUJITSU 0x04
#define NAND_MFR_NATIONAL 0x8f
#define NAND_MFR_RENESAS 0x07
#define NAND_MFR_STMICRO 0x20
#define NAND_MFR_HYNIX 0xad
#define NAND_MFR_MICRON 0x2c

/* onenand.c */
void *onenand_raw_otp(DeviceState *onenand_device);

/* ecc.c */
typedef struct {
uint8_t cp; /* Column parity */
uint16_t lp[2]; /* Line parity */
uint8_t cp; /* Column parity */
uint16_t lp[2]; /* Line parity */
uint16_t count;
} ECCState;

Expand Down
3 changes: 3 additions & 0 deletions include/hw/boards.h
Expand Up @@ -381,6 +381,9 @@ struct MachineState {
} \
type_init(machine_initfn##_register_types)

extern GlobalProperty hw_compat_8_0[];
extern const size_t hw_compat_8_0_len;

extern GlobalProperty hw_compat_7_2[];
extern const size_t hw_compat_7_2_len;

Expand Down
39 changes: 1 addition & 38 deletions include/hw/core/cpu.h
Expand Up @@ -949,7 +949,7 @@ static inline bool cpu_breakpoint_test(CPUState *cpu, vaddr pc, int mask)
return false;
}

#if !defined(CONFIG_TCG) || defined(CONFIG_USER_ONLY)
#if defined(CONFIG_USER_ONLY)
static inline int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
int flags, CPUWatchpoint **watchpoint)
{
Expand All @@ -970,50 +970,13 @@ static inline void cpu_watchpoint_remove_by_ref(CPUState *cpu,
static inline void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
{
}

static inline void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len,
MemTxAttrs atr, int fl, uintptr_t ra)
{
}

static inline int cpu_watchpoint_address_matches(CPUState *cpu,
vaddr addr, vaddr len)
{
return 0;
}
#else
int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
int flags, CPUWatchpoint **watchpoint);
int cpu_watchpoint_remove(CPUState *cpu, vaddr addr,
vaddr len, int flags);
void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint);
void cpu_watchpoint_remove_all(CPUState *cpu, int mask);

/**
* cpu_check_watchpoint:
* @cpu: cpu context
* @addr: guest virtual address
* @len: access length
* @attrs: memory access attributes
* @flags: watchpoint access type
* @ra: unwind return address
*
* Check for a watchpoint hit in [addr, addr+len) of the type
* specified by @flags. Exit via exception with a hit.
*/
void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len,
MemTxAttrs attrs, int flags, uintptr_t ra);

/**
* cpu_watchpoint_address_matches:
* @cpu: cpu context
* @addr: guest virtual address
* @len: access length
*
* Return the watchpoint flags that apply to [addr, addr+len).
* If no watchpoint is registered for the range, the result is 0.
*/
int cpu_watchpoint_address_matches(CPUState *cpu, vaddr addr, vaddr len);
#endif

/**
Expand Down
43 changes: 43 additions & 0 deletions include/hw/core/tcg-cpu-ops.h
Expand Up @@ -175,4 +175,47 @@ struct TCGCPUOps {

};

#if defined(CONFIG_USER_ONLY)

static inline void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len,
MemTxAttrs atr, int fl, uintptr_t ra)
{
}

static inline int cpu_watchpoint_address_matches(CPUState *cpu,
vaddr addr, vaddr len)
{
return 0;
}

#else

/**
* cpu_check_watchpoint:
* @cpu: cpu context
* @addr: guest virtual address
* @len: access length
* @attrs: memory access attributes
* @flags: watchpoint access type
* @ra: unwind return address
*
* Check for a watchpoint hit in [addr, addr+len) of the type
* specified by @flags. Exit via exception with a hit.
*/
void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len,
MemTxAttrs attrs, int flags, uintptr_t ra);

/**
* cpu_watchpoint_address_matches:
* @cpu: cpu context
* @addr: guest virtual address
* @len: access length
*
* Return the watchpoint flags that apply to [addr, addr+len).
* If no watchpoint is registered for the range, the result is 0.
*/
int cpu_watchpoint_address_matches(CPUState *cpu, vaddr addr, vaddr len);

#endif

#endif /* TCG_CPU_OPS_H */
7 changes: 7 additions & 0 deletions include/hw/i2c/aspeed_i2c.h
Expand Up @@ -38,6 +38,13 @@ OBJECT_DECLARE_TYPE(AspeedI2CState, AspeedI2CClass, ASPEED_I2C)
#define ASPEED_I2C_OLD_NUM_REG 11
#define ASPEED_I2C_NEW_NUM_REG 22

#define A_I2CD_M_STOP_CMD BIT(5)
#define A_I2CD_M_RX_CMD BIT(3)
#define A_I2CD_M_TX_CMD BIT(1)
#define A_I2CD_M_START_CMD BIT(0)

#define A_I2CD_MASTER_EN BIT(0)

/* Tx State Machine */
#define I2CD_TX_STATE_MASK 0xf
#define I2CD_IDLE 0x0
Expand Down
3 changes: 3 additions & 0 deletions include/hw/i386/pc.h
Expand Up @@ -195,6 +195,9 @@ void pc_madt_cpu_entry(int uid, const CPUArchIdList *apic_ids,
/* sgx.c */
void pc_machine_init_sgx_epc(PCMachineState *pcms);

extern GlobalProperty pc_compat_8_0[];
extern const size_t pc_compat_8_0_len;

extern GlobalProperty pc_compat_7_2[];
extern const size_t pc_compat_7_2_len;

Expand Down
248 changes: 124 additions & 124 deletions include/hw/ide/internal.h

Large diffs are not rendered by default.

3 changes: 1 addition & 2 deletions include/hw/misc/lasi.h
Expand Up @@ -69,8 +69,7 @@ struct LasiState {

uint32_t errlog;
uint32_t amr;
uint32_t rtc;
time_t rtc_ref;
uint32_t rtc_ref;

MemoryRegion this_mem;
};
Expand Down
2 changes: 2 additions & 0 deletions include/hw/net/imx_fec.h
Expand Up @@ -270,6 +270,8 @@ struct IMXFECState {
uint32_t phy_int;
uint32_t phy_int_mask;
uint32_t phy_num;
bool phy_connected;
struct IMXFECState *phy_consumer;

bool is_fec;

Expand Down
123 changes: 123 additions & 0 deletions include/hw/watchdog/allwinner-wdt.h
@@ -0,0 +1,123 @@
/*
* Allwinner Watchdog emulation
*
* Copyright (C) 2023 Strahinja Jankovic <strahinja.p.jankovic@gmail.com>
*
* This file is derived from Allwinner RTC,
* by Niek Linnenbank.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/

#ifndef HW_WATCHDOG_ALLWINNER_WDT_H
#define HW_WATCHDOG_ALLWINNER_WDT_H

#include "qom/object.h"
#include "hw/ptimer.h"
#include "hw/sysbus.h"

/*
* This is a model of the Allwinner watchdog.
* Since watchdog registers belong to the timer module (and are shared with the
* RTC module), the interrupt line from watchdog is not handled right now.
* In QEMU, we just wire up the watchdog reset to watchdog_perform_action(),
* at least for the moment.
*/

#define TYPE_AW_WDT "allwinner-wdt"

/** Allwinner WDT sun4i family (A10, A12), also sun7i (A20) */
#define TYPE_AW_WDT_SUN4I TYPE_AW_WDT "-sun4i"

/** Allwinner WDT sun6i family and newer (A31, H2+, H3, etc) */
#define TYPE_AW_WDT_SUN6I TYPE_AW_WDT "-sun6i"

/** Number of WDT registers */
#define AW_WDT_REGS_NUM (5)

OBJECT_DECLARE_TYPE(AwWdtState, AwWdtClass, AW_WDT)

/**
* Allwinner WDT object instance state.
*/
struct AwWdtState {
/*< private >*/
SysBusDevice parent_obj;

/*< public >*/
MemoryRegion iomem;
struct ptimer_state *timer;

uint32_t regs[AW_WDT_REGS_NUM];
};

/**
* Allwinner WDT class-level struct.
*
* This struct is filled by each sunxi device specific code
* such that the generic code can use this struct to support
* all devices.
*/
struct AwWdtClass {
/*< private >*/
SysBusDeviceClass parent_class;
/*< public >*/

/** Defines device specific register map */
const uint8_t *regmap;

/** Size of the regmap in bytes */
size_t regmap_size;

/**
* Read device specific register
*
* @offset: register offset to read
* @return true if register read successful, false otherwise
*/
bool (*read)(AwWdtState *s, uint32_t offset);

/**
* Write device specific register
*
* @offset: register offset to write
* @data: value to set in register
* @return true if register write successful, false otherwise
*/
bool (*write)(AwWdtState *s, uint32_t offset, uint32_t data);

/**
* Check if watchdog can generate system reset
*
* @return true if watchdog can generate system reset
*/
bool (*can_reset_system)(AwWdtState *s);

/**
* Check if provided key is valid
*
* @value: value written to register
* @return true if key is valid, false otherwise
*/
bool (*is_key_valid)(AwWdtState *s, uint32_t val);

/**
* Get current INTV_VALUE setting
*
* @return current INTV_VALUE (0-15)
*/
uint8_t (*get_intv_value)(AwWdtState *s);
};

#endif /* HW_WATCHDOG_ALLWINNER_WDT_H */
78 changes: 39 additions & 39 deletions include/io/channel.h
Expand Up @@ -301,10 +301,10 @@ ssize_t qio_channel_writev_full(QIOChannel *ioc,
* Returns: 1 if all bytes were read, 0 if end-of-file
* occurs without data, or -1 on error
*/
int qio_channel_readv_all_eof(QIOChannel *ioc,
const struct iovec *iov,
size_t niov,
Error **errp);
int coroutine_mixed_fn qio_channel_readv_all_eof(QIOChannel *ioc,
const struct iovec *iov,
size_t niov,
Error **errp);

/**
* qio_channel_readv_all:
Expand All @@ -328,10 +328,10 @@ int qio_channel_readv_all_eof(QIOChannel *ioc,
*
* Returns: 0 if all bytes were read, or -1 on error
*/
int qio_channel_readv_all(QIOChannel *ioc,
const struct iovec *iov,
size_t niov,
Error **errp);
int coroutine_mixed_fn qio_channel_readv_all(QIOChannel *ioc,
const struct iovec *iov,
size_t niov,
Error **errp);


/**
Expand All @@ -353,10 +353,10 @@ int qio_channel_readv_all(QIOChannel *ioc,
*
* Returns: 0 if all bytes were written, or -1 on error
*/
int qio_channel_writev_all(QIOChannel *ioc,
const struct iovec *iov,
size_t niov,
Error **errp);
int coroutine_mixed_fn qio_channel_writev_all(QIOChannel *ioc,
const struct iovec *iov,
size_t niov,
Error **errp);

/**
* qio_channel_readv:
Expand Down Expand Up @@ -437,10 +437,10 @@ ssize_t qio_channel_write(QIOChannel *ioc,
* Returns: 1 if all bytes were read, 0 if end-of-file occurs
* without data, or -1 on error
*/
int qio_channel_read_all_eof(QIOChannel *ioc,
char *buf,
size_t buflen,
Error **errp);
int coroutine_mixed_fn qio_channel_read_all_eof(QIOChannel *ioc,
char *buf,
size_t buflen,
Error **errp);

/**
* qio_channel_read_all:
Expand All @@ -457,10 +457,10 @@ int qio_channel_read_all_eof(QIOChannel *ioc,
*
* Returns: 0 if all bytes were read, or -1 on error
*/
int qio_channel_read_all(QIOChannel *ioc,
char *buf,
size_t buflen,
Error **errp);
int coroutine_mixed_fn qio_channel_read_all(QIOChannel *ioc,
char *buf,
size_t buflen,
Error **errp);

/**
* qio_channel_write_all:
Expand All @@ -476,10 +476,10 @@ int qio_channel_read_all(QIOChannel *ioc,
*
* Returns: 0 if all bytes were written, or -1 on error
*/
int qio_channel_write_all(QIOChannel *ioc,
const char *buf,
size_t buflen,
Error **errp);
int coroutine_mixed_fn qio_channel_write_all(QIOChannel *ioc,
const char *buf,
size_t buflen,
Error **errp);

/**
* qio_channel_set_blocking:
Expand Down Expand Up @@ -812,11 +812,11 @@ void qio_channel_set_aio_fd_handler(QIOChannel *ioc,
* occurs without data, or -1 on error
*/

int qio_channel_readv_full_all_eof(QIOChannel *ioc,
const struct iovec *iov,
size_t niov,
int **fds, size_t *nfds,
Error **errp);
int coroutine_mixed_fn qio_channel_readv_full_all_eof(QIOChannel *ioc,
const struct iovec *iov,
size_t niov,
int **fds, size_t *nfds,
Error **errp);

/**
* qio_channel_readv_full_all:
Expand All @@ -838,11 +838,11 @@ int qio_channel_readv_full_all_eof(QIOChannel *ioc,
* Returns: 0 if all bytes were read, or -1 on error
*/

int qio_channel_readv_full_all(QIOChannel *ioc,
const struct iovec *iov,
size_t niov,
int **fds, size_t *nfds,
Error **errp);
int coroutine_mixed_fn qio_channel_readv_full_all(QIOChannel *ioc,
const struct iovec *iov,
size_t niov,
int **fds, size_t *nfds,
Error **errp);

/**
* qio_channel_writev_full_all:
Expand Down Expand Up @@ -872,11 +872,11 @@ int qio_channel_readv_full_all(QIOChannel *ioc,
* Returns: 0 if all bytes were written, or -1 on error
*/

int qio_channel_writev_full_all(QIOChannel *ioc,
const struct iovec *iov,
size_t niov,
int *fds, size_t nfds,
int flags, Error **errp);
int coroutine_mixed_fn qio_channel_writev_full_all(QIOChannel *ioc,
const struct iovec *iov,
size_t niov,
int *fds, size_t nfds,
int flags, Error **errp);

/**
* qio_channel_flush:
Expand Down
4 changes: 2 additions & 2 deletions include/migration/qemu-file-types.h
Expand Up @@ -35,7 +35,7 @@ void qemu_put_byte(QEMUFile *f, int v);
void qemu_put_be16(QEMUFile *f, unsigned int v);
void qemu_put_be32(QEMUFile *f, unsigned int v);
void qemu_put_be64(QEMUFile *f, uint64_t v);
size_t qemu_get_buffer(QEMUFile *f, uint8_t *buf, size_t size);
size_t coroutine_mixed_fn qemu_get_buffer(QEMUFile *f, uint8_t *buf, size_t size);

int qemu_get_byte(QEMUFile *f);

Expand Down Expand Up @@ -161,7 +161,7 @@ static inline void qemu_get_sbe64s(QEMUFile *f, int64_t *pv)
qemu_get_be64s(f, (uint64_t *)pv);
}

size_t qemu_get_counted_string(QEMUFile *f, char buf[256]);
size_t coroutine_mixed_fn qemu_get_counted_string(QEMUFile *f, char buf[256]);

void qemu_put_counted_string(QEMUFile *f, const char *name);

Expand Down
4 changes: 2 additions & 2 deletions include/qapi/qmp/dispatch.h
Expand Up @@ -55,8 +55,8 @@ bool qmp_command_available(const QmpCommand *cmd, Error **errp);
const char *qmp_command_name(const QmpCommand *cmd);
bool qmp_has_success_response(const QmpCommand *cmd);
QDict *qmp_error_response(Error *err);
QDict *qmp_dispatch(const QmpCommandList *cmds, QObject *request,
bool allow_oob, Monitor *cur_mon);
QDict *coroutine_mixed_fn qmp_dispatch(const QmpCommandList *cmds, QObject *request,
bool allow_oob, Monitor *cur_mon);
bool qmp_is_oob(const QDict *dict);

typedef void (*qmp_cmd_callback_fn)(const QmpCommand *cmd, void *opaque);
Expand Down
201 changes: 201 additions & 0 deletions include/qemu/qtree.h
@@ -0,0 +1,201 @@
/*
* GLIB - Library of useful routines for C programming
* Copyright (C) 1995-1997 Peter Mattis, Spencer Kimball and Josh MacDonald
*
* SPDX-License-Identifier: LGPL-2.1-or-later
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/

/*
* Modified by the GLib Team and others 1997-2000. See the AUTHORS
* file for a list of people on the GLib Team. See the ChangeLog
* files for a list of changes. These files are distributed with
* GLib at ftp://ftp.gtk.org/pub/gtk/.
*/

/*
* QTree is a partial import of Glib's GTree. The parts excluded correspond
* to API calls either deprecated (e.g. g_tree_traverse) or recently added
* (e.g. g_tree_search_node, added in 2.68); neither have callers in QEMU.
*
* The reason for this import is to allow us to control the memory allocator
* used by the tree implementation. Until Glib 2.75.3, GTree uses Glib's
* slice allocator, which causes problems when forking in user-mode;
* see https://gitlab.com/qemu-project/qemu/-/issues/285 and glib's
* "45b5a6c1e gslice: Remove slice allocator and use malloc() instead".
*
* TODO: remove QTree when QEMU's minimum Glib version is >= 2.75.3.
*/

#ifndef QEMU_QTREE_H
#define QEMU_QTREE_H

#include "qemu/osdep.h"

#ifdef HAVE_GLIB_WITH_SLICE_ALLOCATOR

typedef struct _QTree QTree;

typedef struct _QTreeNode QTreeNode;

typedef gboolean (*QTraverseNodeFunc)(QTreeNode *node,
gpointer user_data);

/*
* Balanced binary trees
*/
QTree *q_tree_new(GCompareFunc key_compare_func);
QTree *q_tree_new_with_data(GCompareDataFunc key_compare_func,
gpointer key_compare_data);
QTree *q_tree_new_full(GCompareDataFunc key_compare_func,
gpointer key_compare_data,
GDestroyNotify key_destroy_func,
GDestroyNotify value_destroy_func);
QTree *q_tree_ref(QTree *tree);
void q_tree_unref(QTree *tree);
void q_tree_destroy(QTree *tree);
void q_tree_insert(QTree *tree,
gpointer key,
gpointer value);
void q_tree_replace(QTree *tree,
gpointer key,
gpointer value);
gboolean q_tree_remove(QTree *tree,
gconstpointer key);
gboolean q_tree_steal(QTree *tree,
gconstpointer key);
gpointer q_tree_lookup(QTree *tree,
gconstpointer key);
gboolean q_tree_lookup_extended(QTree *tree,
gconstpointer lookup_key,
gpointer *orig_key,
gpointer *value);
void q_tree_foreach(QTree *tree,
GTraverseFunc func,
gpointer user_data);
gpointer q_tree_search(QTree *tree,
GCompareFunc search_func,
gconstpointer user_data);
gint q_tree_height(QTree *tree);
gint q_tree_nnodes(QTree *tree);

#else /* !HAVE_GLIB_WITH_SLICE_ALLOCATOR */

typedef GTree QTree;
typedef GTreeNode QTreeNode;
typedef GTraverseNodeFunc QTraverseNodeFunc;

static inline QTree *q_tree_new(GCompareFunc key_compare_func)
{
return g_tree_new(key_compare_func);
}

static inline QTree *q_tree_new_with_data(GCompareDataFunc key_compare_func,
gpointer key_compare_data)
{
return g_tree_new_with_data(key_compare_func, key_compare_data);
}

static inline QTree *q_tree_new_full(GCompareDataFunc key_compare_func,
gpointer key_compare_data,
GDestroyNotify key_destroy_func,
GDestroyNotify value_destroy_func)
{
return g_tree_new_full(key_compare_func, key_compare_data,
key_destroy_func, value_destroy_func);
}

static inline QTree *q_tree_ref(QTree *tree)
{
return g_tree_ref(tree);
}

static inline void q_tree_unref(QTree *tree)
{
g_tree_unref(tree);
}

static inline void q_tree_destroy(QTree *tree)
{
g_tree_destroy(tree);
}

static inline void q_tree_insert(QTree *tree,
gpointer key,
gpointer value)
{
g_tree_insert(tree, key, value);
}

static inline void q_tree_replace(QTree *tree,
gpointer key,
gpointer value)
{
g_tree_replace(tree, key, value);
}

static inline gboolean q_tree_remove(QTree *tree,
gconstpointer key)
{
return g_tree_remove(tree, key);
}

static inline gboolean q_tree_steal(QTree *tree,
gconstpointer key)
{
return g_tree_steal(tree, key);
}

static inline gpointer q_tree_lookup(QTree *tree,
gconstpointer key)
{
return g_tree_lookup(tree, key);
}

static inline gboolean q_tree_lookup_extended(QTree *tree,
gconstpointer lookup_key,
gpointer *orig_key,
gpointer *value)
{
return g_tree_lookup_extended(tree, lookup_key, orig_key, value);
}

static inline void q_tree_foreach(QTree *tree,
GTraverseFunc func,
gpointer user_data)
{
return g_tree_foreach(tree, func, user_data);
}

static inline gpointer q_tree_search(QTree *tree,
GCompareFunc search_func,
gconstpointer user_data)
{
return g_tree_search(tree, search_func, user_data);
}

static inline gint q_tree_height(QTree *tree)
{
return g_tree_height(tree);
}

static inline gint q_tree_nnodes(QTree *tree)
{
return g_tree_nnodes(tree);
}

#endif /* HAVE_GLIB_WITH_SLICE_ALLOCATOR */

#endif /* QEMU_QTREE_H */
4 changes: 3 additions & 1 deletion include/sysemu/block-backend-io.h
Expand Up @@ -70,10 +70,12 @@ void co_wrapper blk_eject(BlockBackend *blk, bool eject_flag);
int64_t coroutine_fn blk_co_getlength(BlockBackend *blk);
int64_t co_wrapper_mixed blk_getlength(BlockBackend *blk);

void coroutine_fn blk_co_get_geometry(BlockBackend *blk,
uint64_t *nb_sectors_ptr);
void blk_get_geometry(BlockBackend *blk, uint64_t *nb_sectors_ptr);

int64_t coroutine_fn blk_co_nb_sectors(BlockBackend *blk);
int64_t co_wrapper_mixed blk_nb_sectors(BlockBackend *blk);
int64_t blk_nb_sectors(BlockBackend *blk);

void *blk_try_blockalign(BlockBackend *blk, size_t size);
void *blk_blockalign(BlockBackend *blk, size_t size);
Expand Down
4 changes: 4 additions & 0 deletions include/sysemu/qtest.h
Expand Up @@ -14,6 +14,7 @@
#ifndef QTEST_H
#define QTEST_H

#include "chardev/char.h"

extern bool qtest_allowed;

Expand All @@ -22,6 +23,9 @@ static inline bool qtest_enabled(void)
return qtest_allowed;
}

void qtest_send_prefix(CharBackend *chr);
void G_GNUC_PRINTF(2, 3) qtest_sendf(CharBackend *chr, const char *fmt, ...);
void qtest_set_command_cb(bool (*pc_cb)(CharBackend *chr, gchar **words));
bool qtest_driver(void);

void qtest_server_init(const char *qtest_chrdev, const char *qtest_log, Error **errp);
Expand Down
3 changes: 3 additions & 0 deletions include/sysemu/tpm.h
Expand Up @@ -48,6 +48,7 @@ struct TPMIfClass {
#define TYPE_TPM_TIS_SYSBUS "tpm-tis-device"
#define TYPE_TPM_CRB "tpm-crb"
#define TYPE_TPM_SPAPR "tpm-spapr"
#define TYPE_TPM_TIS_I2C "tpm-tis-i2c"

#define TPM_IS_TIS_ISA(chr) \
object_dynamic_cast(OBJECT(chr), TYPE_TPM_TIS_ISA)
Expand All @@ -57,6 +58,8 @@ struct TPMIfClass {
object_dynamic_cast(OBJECT(chr), TYPE_TPM_CRB)
#define TPM_IS_SPAPR(chr) \
object_dynamic_cast(OBJECT(chr), TYPE_TPM_SPAPR)
#define TPM_IS_TIS_I2C(chr) \
object_dynamic_cast(OBJECT(chr), TYPE_TPM_TIS_I2C)

/* returns NULL unless there is exactly one TPM device */
static inline TPMIf *tpm_find(void)
Expand Down
6 changes: 0 additions & 6 deletions include/tcg/tcg.h
Expand Up @@ -967,12 +967,6 @@ typedef struct TCGTargetOpDef {
const char *args_ct_str[TCG_MAX_OP_ARGS];
} TCGTargetOpDef;

#define tcg_abort() \
do {\
fprintf(stderr, "%s:%d: tcg fatal error\n", __FILE__, __LINE__);\
abort();\
} while (0)

bool tcg_op_supported(TCGOpcode op);

void tcg_gen_callN(void *func, TCGTemp *ret, int nargs, TCGTemp **args);
Expand Down
3 changes: 3 additions & 0 deletions io/channel-tls.c
Expand Up @@ -74,6 +74,9 @@ qio_channel_tls_new_server(QIOChannel *master,
ioc = QIO_CHANNEL_TLS(object_new(TYPE_QIO_CHANNEL_TLS));

ioc->master = master;
if (qio_channel_has_feature(master, QIO_CHANNEL_FEATURE_SHUTDOWN)) {
qio_channel_set_feature(QIO_CHANNEL(ioc), QIO_CHANNEL_FEATURE_SHUTDOWN);
}
object_ref(OBJECT(master));

ioc->session = qcrypto_tls_session_new(
Expand Down
78 changes: 39 additions & 39 deletions io/channel.c
Expand Up @@ -109,27 +109,27 @@ ssize_t qio_channel_writev_full(QIOChannel *ioc,
}


int qio_channel_readv_all_eof(QIOChannel *ioc,
const struct iovec *iov,
size_t niov,
Error **errp)
int coroutine_mixed_fn qio_channel_readv_all_eof(QIOChannel *ioc,
const struct iovec *iov,
size_t niov,
Error **errp)
{
return qio_channel_readv_full_all_eof(ioc, iov, niov, NULL, NULL, errp);
}

int qio_channel_readv_all(QIOChannel *ioc,
const struct iovec *iov,
size_t niov,
Error **errp)
int coroutine_mixed_fn qio_channel_readv_all(QIOChannel *ioc,
const struct iovec *iov,
size_t niov,
Error **errp)
{
return qio_channel_readv_full_all(ioc, iov, niov, NULL, NULL, errp);
}

int qio_channel_readv_full_all_eof(QIOChannel *ioc,
const struct iovec *iov,
size_t niov,
int **fds, size_t *nfds,
Error **errp)
int coroutine_mixed_fn qio_channel_readv_full_all_eof(QIOChannel *ioc,
const struct iovec *iov,
size_t niov,
int **fds, size_t *nfds,
Error **errp)
{
int ret = -1;
struct iovec *local_iov = g_new(struct iovec, niov);
Expand Down Expand Up @@ -215,11 +215,11 @@ int qio_channel_readv_full_all_eof(QIOChannel *ioc,
return ret;
}

int qio_channel_readv_full_all(QIOChannel *ioc,
const struct iovec *iov,
size_t niov,
int **fds, size_t *nfds,
Error **errp)
int coroutine_mixed_fn qio_channel_readv_full_all(QIOChannel *ioc,
const struct iovec *iov,
size_t niov,
int **fds, size_t *nfds,
Error **errp)
{
int ret = qio_channel_readv_full_all_eof(ioc, iov, niov, fds, nfds, errp);

Expand All @@ -234,19 +234,19 @@ int qio_channel_readv_full_all(QIOChannel *ioc,
return ret;
}

int qio_channel_writev_all(QIOChannel *ioc,
const struct iovec *iov,
size_t niov,
Error **errp)
int coroutine_mixed_fn qio_channel_writev_all(QIOChannel *ioc,
const struct iovec *iov,
size_t niov,
Error **errp)
{
return qio_channel_writev_full_all(ioc, iov, niov, NULL, 0, 0, errp);
}

int qio_channel_writev_full_all(QIOChannel *ioc,
const struct iovec *iov,
size_t niov,
int *fds, size_t nfds,
int flags, Error **errp)
int coroutine_mixed_fn qio_channel_writev_full_all(QIOChannel *ioc,
const struct iovec *iov,
size_t niov,
int *fds, size_t nfds,
int flags, Error **errp)
{
int ret = -1;
struct iovec *local_iov = g_new(struct iovec, niov);
Expand Down Expand Up @@ -325,30 +325,30 @@ ssize_t qio_channel_write(QIOChannel *ioc,
}


int qio_channel_read_all_eof(QIOChannel *ioc,
char *buf,
size_t buflen,
Error **errp)
int coroutine_mixed_fn qio_channel_read_all_eof(QIOChannel *ioc,
char *buf,
size_t buflen,
Error **errp)
{
struct iovec iov = { .iov_base = buf, .iov_len = buflen };
return qio_channel_readv_all_eof(ioc, &iov, 1, errp);
}


int qio_channel_read_all(QIOChannel *ioc,
char *buf,
size_t buflen,
Error **errp)
int coroutine_mixed_fn qio_channel_read_all(QIOChannel *ioc,
char *buf,
size_t buflen,
Error **errp)
{
struct iovec iov = { .iov_base = buf, .iov_len = buflen };
return qio_channel_readv_all(ioc, &iov, 1, errp);
}


int qio_channel_write_all(QIOChannel *ioc,
const char *buf,
size_t buflen,
Error **errp)
int coroutine_mixed_fn qio_channel_write_all(QIOChannel *ioc,
const char *buf,
size_t buflen,
Error **errp)
{
struct iovec iov = { .iov_base = (char *)buf, .iov_len = buflen };
return qio_channel_writev_all(ioc, &iov, 1, errp);
Expand Down
2 changes: 1 addition & 1 deletion linux-user/arm/target_cpu.h
Expand Up @@ -30,7 +30,7 @@ static inline unsigned long arm_max_reserved_va(CPUState *cs)
* the high addresses. Restrict linux-user to the
* cached write-back RAM in the system map.
*/
return 0x80000000ul;
return 0x7ffffffful;
} else {
/*
* We need to be able to map the commpage.
Expand Down
37 changes: 19 additions & 18 deletions linux-user/elfload.c
Expand Up @@ -208,12 +208,12 @@ static bool init_guest_commpage(void)
* has specified -R reserved_va, which would trigger an assert().
*/
if (reserved_va != 0 &&
TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE >= reserved_va) {
TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE - 1 > reserved_va) {
error_report("Cannot allocate vsyscall page");
exit(EXIT_FAILURE);
}
page_set_flags(TARGET_VSYSCALL_PAGE,
TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE,
TARGET_VSYSCALL_PAGE | ~TARGET_PAGE_MASK,
PAGE_EXEC | PAGE_VALID);
return true;
}
Expand Down Expand Up @@ -444,7 +444,7 @@ static bool init_guest_commpage(void)
exit(EXIT_FAILURE);
}

page_set_flags(commpage, commpage + qemu_host_page_size,
page_set_flags(commpage, commpage | ~qemu_host_page_mask,
PAGE_READ | PAGE_EXEC | PAGE_VALID);
return true;
}
Expand Down Expand Up @@ -1316,7 +1316,7 @@ static bool init_guest_commpage(void)
exit(EXIT_FAILURE);
}

page_set_flags(LO_COMMPAGE, LO_COMMPAGE + TARGET_PAGE_SIZE,
page_set_flags(LO_COMMPAGE, LO_COMMPAGE | ~TARGET_PAGE_MASK,
PAGE_READ | PAGE_EXEC | PAGE_VALID);
return true;
}
Expand Down Expand Up @@ -1728,7 +1728,7 @@ static bool init_guest_commpage(void)
* and implement syscalls. Here, simply mark the page executable.
* Special case the entry points during translation (see do_page_zero).
*/
page_set_flags(LO_COMMPAGE, LO_COMMPAGE + TARGET_PAGE_SIZE,
page_set_flags(LO_COMMPAGE, LO_COMMPAGE | ~TARGET_PAGE_MASK,
PAGE_EXEC | PAGE_VALID);
return true;
}
Expand Down Expand Up @@ -2209,7 +2209,8 @@ static void zero_bss(abi_ulong elf_bss, abi_ulong last_bss, int prot)

/* Ensure that the bss page(s) are valid */
if ((page_get_flags(last_bss-1) & prot) != prot) {
page_set_flags(elf_bss & TARGET_PAGE_MASK, last_bss, prot | PAGE_VALID);
page_set_flags(elf_bss & TARGET_PAGE_MASK, last_bss - 1,
prot | PAGE_VALID);
}

if (host_start < host_map_start) {
Expand Down Expand Up @@ -2511,7 +2512,7 @@ static void pgb_have_guest_base(const char *image_name, abi_ulong guest_loaddr,
if ((guest_hiaddr - guest_base) > ~(uintptr_t)0) {
error_report("%s: requires more virtual address space "
"than the host can provide (0x%" PRIx64 ")",
image_name, (uint64_t)guest_hiaddr - guest_base);
image_name, (uint64_t)guest_hiaddr + 1 - guest_base);
exit(EXIT_FAILURE);
}
#endif
Expand All @@ -2529,13 +2530,13 @@ static void pgb_have_guest_base(const char *image_name, abi_ulong guest_loaddr,

/* Reserve the address space for the binary, or reserved_va. */
test = g2h_untagged(guest_loaddr);
addr = mmap(test, guest_hiaddr - guest_loaddr, PROT_NONE, flags, -1, 0);
addr = mmap(test, guest_hiaddr - guest_loaddr + 1, PROT_NONE, flags, -1, 0);
if (test != addr) {
pgb_fail_in_use(image_name);
}
qemu_log_mask(CPU_LOG_PAGE,
"%s: base @ %p for " TARGET_ABI_FMT_ld " bytes\n",
__func__, addr, guest_hiaddr - guest_loaddr);
"%s: base @ %p for %" PRIu64 " bytes\n",
__func__, addr, (uint64_t)guest_hiaddr - guest_loaddr + 1);
}

/**
Expand Down Expand Up @@ -2679,7 +2680,7 @@ static void pgb_static(const char *image_name, abi_ulong orig_loaddr,
if (hiaddr != orig_hiaddr) {
error_report("%s: requires virtual address space that the "
"host cannot provide (0x%" PRIx64 ")",
image_name, (uint64_t)orig_hiaddr);
image_name, (uint64_t)orig_hiaddr + 1);
exit(EXIT_FAILURE);
}

Expand All @@ -2693,15 +2694,15 @@ static void pgb_static(const char *image_name, abi_ulong orig_loaddr,
* arithmetic wraps around.
*/
if (sizeof(uintptr_t) == 8 || loaddr >= 0x80000000u) {
hiaddr = (uintptr_t) 4 << 30;
hiaddr = UINT32_MAX;
} else {
offset = -(HI_COMMPAGE & -align);
}
} else if (LO_COMMPAGE != -1) {
loaddr = MIN(loaddr, LO_COMMPAGE & -align);
}

addr = pgb_find_hole(loaddr, hiaddr - loaddr, align, offset);
addr = pgb_find_hole(loaddr, hiaddr - loaddr + 1, align, offset);
if (addr == -1) {
/*
* If HI_COMMPAGE, there *might* be a non-consecutive allocation
Expand Down Expand Up @@ -2767,17 +2768,17 @@ static void pgb_reserved_va(const char *image_name, abi_ulong guest_loaddr,
/* Reserve the memory on the host. */
assert(guest_base != 0);
test = g2h_untagged(0);
addr = mmap(test, reserved_va, PROT_NONE, flags, -1, 0);
addr = mmap(test, reserved_va + 1, PROT_NONE, flags, -1, 0);
if (addr == MAP_FAILED || addr != test) {
error_report("Unable to reserve 0x%lx bytes of virtual address "
"space at %p (%s) for use as guest address space (check your "
"virtual memory ulimit setting, min_mmap_addr or reserve less "
"using -R option)", reserved_va, test, strerror(errno));
"using -R option)", reserved_va + 1, test, strerror(errno));
exit(EXIT_FAILURE);
}

qemu_log_mask(CPU_LOG_PAGE, "%s: base @ %p for %lu bytes\n",
__func__, addr, reserved_va);
__func__, addr, reserved_va + 1);
}

void probe_guest_base(const char *image_name, abi_ulong guest_loaddr,
Expand Down Expand Up @@ -3020,7 +3021,7 @@ static void load_elf_image(const char *image_name, int image_fd,
if (a < loaddr) {
loaddr = a;
}
a = eppnt->p_vaddr + eppnt->p_memsz;
a = eppnt->p_vaddr + eppnt->p_memsz - 1;
if (a > hiaddr) {
hiaddr = a;
}
Expand Down Expand Up @@ -3111,7 +3112,7 @@ static void load_elf_image(const char *image_name, int image_fd,
* In both cases, we will overwrite pages in this range with mappings
* from the executable.
*/
load_addr = target_mmap(loaddr, hiaddr - loaddr, PROT_NONE,
load_addr = target_mmap(loaddr, (size_t)hiaddr - loaddr + 1, PROT_NONE,
MAP_PRIVATE | MAP_ANON | MAP_NORESERVE |
(ehdr->e_type == ET_EXEC ? MAP_FIXED : 0),
-1, 0);
Expand Down
2 changes: 1 addition & 1 deletion linux-user/flatload.c
Expand Up @@ -448,7 +448,7 @@ static int load_flat_file(struct linux_binprm * bprm,
* Allocate the address space.
*/
probe_guest_base(bprm->filename, 0,
text_len + data_len + extra + indx_len);
text_len + data_len + extra + indx_len - 1);

/*
* there are a couple of cases here, the separate code/data
Expand Down
31 changes: 18 additions & 13 deletions linux-user/main.c
Expand Up @@ -109,11 +109,9 @@ static const char *last_log_filename;
# if HOST_LONG_BITS > TARGET_VIRT_ADDR_SPACE_BITS
# if TARGET_VIRT_ADDR_SPACE_BITS == 32 && \
(TARGET_LONG_BITS == 32 || defined(TARGET_ABI32))
/* There are a number of places where we assign reserved_va to a variable
of type abi_ulong and expect it to fit. Avoid the last page. */
# define MAX_RESERVED_VA(CPU) (0xfffffffful & TARGET_PAGE_MASK)
# define MAX_RESERVED_VA(CPU) 0xfffffffful
# else
# define MAX_RESERVED_VA(CPU) (1ul << TARGET_VIRT_ADDR_SPACE_BITS)
# define MAX_RESERVED_VA(CPU) ((1ul << TARGET_VIRT_ADDR_SPACE_BITS) - 1)
# endif
# else
# define MAX_RESERVED_VA(CPU) 0
Expand Down Expand Up @@ -379,7 +377,9 @@ static void handle_arg_reserved_va(const char *arg)
{
char *p;
int shift = 0;
reserved_va = strtoul(arg, &p, 0);
unsigned long val;

val = strtoul(arg, &p, 0);
switch (*p) {
case 'k':
case 'K':
Expand All @@ -393,10 +393,10 @@ static void handle_arg_reserved_va(const char *arg)
break;
}
if (shift) {
unsigned long unshifted = reserved_va;
unsigned long unshifted = val;
p++;
reserved_va <<= shift;
if (reserved_va >> shift != unshifted) {
val <<= shift;
if (val >> shift != unshifted) {
fprintf(stderr, "Reserved virtual address too big\n");
exit(EXIT_FAILURE);
}
Expand All @@ -405,6 +405,8 @@ static void handle_arg_reserved_va(const char *arg)
fprintf(stderr, "Unrecognised -R size suffix '%s'\n", p);
exit(EXIT_FAILURE);
}
/* The representation is size - 1, with 0 remaining "default". */
reserved_va = val ? val - 1 : 0;
}

static void handle_arg_singlestep(const char *arg)
Expand Down Expand Up @@ -793,16 +795,19 @@ int main(int argc, char **argv, char **envp)
*/
max_reserved_va = MAX_RESERVED_VA(cpu);
if (reserved_va != 0) {
if ((reserved_va + 1) % qemu_host_page_size) {
char *s = size_to_str(qemu_host_page_size);
fprintf(stderr, "Reserved virtual address not aligned mod %s\n", s);
g_free(s);
exit(EXIT_FAILURE);
}
if (max_reserved_va && reserved_va > max_reserved_va) {
fprintf(stderr, "Reserved virtual address too big\n");
exit(EXIT_FAILURE);
}
} else if (HOST_LONG_BITS == 64 && TARGET_VIRT_ADDR_SPACE_BITS <= 32) {
/*
* reserved_va must be aligned with the host page size
* as it is used with mmap()
*/
reserved_va = max_reserved_va & qemu_host_page_mask;
/* MAX_RESERVED_VA + 1 is a large power of 2, so is aligned. */
reserved_va = max_reserved_va;
}

{
Expand Down
3 changes: 3 additions & 0 deletions linux-user/mips/target_elf.h
Expand Up @@ -15,6 +15,9 @@ static inline const char *cpu_get_model(uint32_t eflags)
if ((eflags & EF_MIPS_MACH) == EF_MIPS_MACH_5900) {
return "R5900";
}
if (eflags & EF_MIPS_NAN2008) {
return "P5600";
}
return "24Kf";
}
#endif
22 changes: 11 additions & 11 deletions linux-user/mmap.c
Expand Up @@ -181,7 +181,7 @@ int target_mprotect(abi_ulong start, abi_ulong len, int target_prot)
}
}

page_set_flags(start, start + len, page_flags);
page_set_flags(start, start + len - 1, page_flags);
ret = 0;

error:
Expand Down Expand Up @@ -283,7 +283,7 @@ static abi_ulong mmap_find_vma_reserved(abi_ulong start, abi_ulong size,
end_addr = start + size;
if (start > reserved_va - size) {
/* Start at the top of the address space. */
end_addr = ((reserved_va - size) & -align) + size;
end_addr = ((reserved_va + 1 - size) & -align) + size;
looped = true;
}

Expand All @@ -297,7 +297,7 @@ static abi_ulong mmap_find_vma_reserved(abi_ulong start, abi_ulong size,
return (abi_ulong)-1;
}
/* Re-start at the top of the address space. */
addr = end_addr = ((reserved_va - size) & -align) + size;
addr = end_addr = ((reserved_va + 1 - size) & -align) + size;
looped = true;
} else {
prot = page_get_flags(addr);
Expand Down Expand Up @@ -640,15 +640,15 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int target_prot,
}
page_flags |= PAGE_RESET;
if (passthrough_start == passthrough_end) {
page_set_flags(start, start + len, page_flags);
page_set_flags(start, start + len - 1, page_flags);
} else {
if (start < passthrough_start) {
page_set_flags(start, passthrough_start, page_flags);
page_set_flags(start, passthrough_start - 1, page_flags);
}
page_set_flags(passthrough_start, passthrough_end,
page_set_flags(passthrough_start, passthrough_end - 1,
page_flags | PAGE_PASSTHROUGH);
if (passthrough_end < start + len) {
page_set_flags(passthrough_end, start + len, page_flags);
page_set_flags(passthrough_end, start + len - 1, page_flags);
}
}
the_end:
Expand Down Expand Up @@ -763,7 +763,7 @@ int target_munmap(abi_ulong start, abi_ulong len)
}

if (ret == 0) {
page_set_flags(start, start + len, 0);
page_set_flags(start, start + len - 1, 0);
}
mmap_unlock();
return ret;
Expand Down Expand Up @@ -849,8 +849,8 @@ abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size,
} else {
new_addr = h2g(host_addr);
prot = page_get_flags(old_addr);
page_set_flags(old_addr, old_addr + old_size, 0);
page_set_flags(new_addr, new_addr + new_size,
page_set_flags(old_addr, old_addr + old_size - 1, 0);
page_set_flags(new_addr, new_addr + new_size - 1,
prot | PAGE_VALID | PAGE_RESET);
}
mmap_unlock();
Expand Down Expand Up @@ -946,7 +946,7 @@ abi_long target_madvise(abi_ulong start, abi_ulong len_in, int advice)
if (can_passthrough_madvise(start, end)) {
ret = get_errno(madvise(g2h_untagged(start), len, advice));
if ((advice == MADV_DONTNEED) && (ret == 0)) {
page_reset_target_data(start, start + len);
page_reset_target_data(start, start + len - 1);
}
}
}
Expand Down
4 changes: 2 additions & 2 deletions linux-user/syscall.c
Expand Up @@ -4595,7 +4595,7 @@ static inline abi_ulong do_shmat(CPUArchState *cpu_env,
}
raddr=h2g((unsigned long)host_raddr);

page_set_flags(raddr, raddr + shm_info.shm_segsz,
page_set_flags(raddr, raddr + shm_info.shm_segsz - 1,
PAGE_VALID | PAGE_RESET | PAGE_READ |
(shmflg & SHM_RDONLY ? 0 : PAGE_WRITE));

Expand Down Expand Up @@ -4625,7 +4625,7 @@ static inline abi_long do_shmdt(abi_ulong shmaddr)
for (i = 0; i < N_SHM_REGIONS; ++i) {
if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
shm_regions[i].in_use = false;
page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
page_set_flags(shmaddr, shmaddr + shm_regions[i].size - 1, 0);
break;
}
}
Expand Down
2 changes: 1 addition & 1 deletion linux-user/syscall_defs.h
Expand Up @@ -61,7 +61,7 @@

#if (defined(TARGET_I386) && defined(TARGET_ABI32)) \
|| (defined(TARGET_ARM) && defined(TARGET_ABI32)) \
|| defined(TARGET_SPARC) \
|| (defined(TARGET_SPARC) && defined(TARGET_ABI32)) \
|| defined(TARGET_M68K) || defined(TARGET_SH4) || defined(TARGET_CRIS)
/* 16 bit uid wrappers emulation */
#define USE_UID16
Expand Down
12 changes: 6 additions & 6 deletions linux-user/user-internals.h
Expand Up @@ -76,19 +76,19 @@ void fork_end(int child);
/**
* probe_guest_base:
* @image_name: the executable being loaded
* @loaddr: the lowest fixed address in the executable
* @hiaddr: the highest fixed address in the executable
* @loaddr: the lowest fixed address within the executable
* @hiaddr: the highest fixed address within the executable
*
* Creates the initial guest address space in the host memory space.
*
* If @loaddr == 0, then no address in the executable is fixed,
* i.e. it is fully relocatable. In that case @hiaddr is the size
* of the executable.
* If @loaddr == 0, then no address in the executable is fixed, i.e.
* it is fully relocatable. In that case @hiaddr is the size of the
* executable minus one.
*
* This function will not return if a valid value for guest_base
* cannot be chosen. On return, the executable loader can expect
*
* target_mmap(loaddr, hiaddr - loaddr, ...)
* target_mmap(loaddr, hiaddr - loaddr + 1, ...)
*
* to succeed.
*/
Expand Down
4 changes: 4 additions & 0 deletions meson.build
Expand Up @@ -508,6 +508,10 @@ glib = declare_dependency(compile_args: config_host['GLIB_CFLAGS'].split(),
})
# override glib dep with the configure results (for subprojects)
meson.override_dependency('glib-2.0', glib)
# pass down whether Glib has the slice allocator
if config_host.has_key('HAVE_GLIB_WITH_SLICE_ALLOCATOR')
config_host_data.set('HAVE_GLIB_WITH_SLICE_ALLOCATOR', true)
endif

gio = not_found
gdbus_codegen = not_found
Expand Down
5 changes: 2 additions & 3 deletions migration/block.c
Expand Up @@ -195,7 +195,7 @@ static int bmds_aio_inflight(BlkMigDevState *bmds, int64_t sector)
{
int64_t chunk = sector / (int64_t)BDRV_SECTORS_PER_DIRTY_CHUNK;

if (sector < blk_nb_sectors(bmds->blk)) {
if (sector < bmds->total_sectors) {
return !!(bmds->aio_bitmap[chunk / (sizeof(unsigned long) * 8)] &
(1UL << (chunk % (sizeof(unsigned long) * 8))));
} else {
Expand Down Expand Up @@ -229,10 +229,9 @@ static void bmds_set_aio_inflight(BlkMigDevState *bmds, int64_t sector_num,

static void alloc_aio_bitmap(BlkMigDevState *bmds)
{
BlockBackend *bb = bmds->blk;
int64_t bitmap_size;

bitmap_size = blk_nb_sectors(bb) + BDRV_SECTORS_PER_DIRTY_CHUNK * 8 - 1;
bitmap_size = bmds->total_sectors + BDRV_SECTORS_PER_DIRTY_CHUNK * 8 - 1;
bitmap_size /= BDRV_SECTORS_PER_DIRTY_CHUNK * 8;

bmds->aio_bitmap = g_malloc0(bitmap_size);
Expand Down
19 changes: 17 additions & 2 deletions migration/migration.c
Expand Up @@ -3464,8 +3464,12 @@ static void migration_completion(MigrationState *s)
qemu_savevm_state_complete_postcopy(s->to_dst_file);
qemu_mutex_unlock_iothread();

/* Shutdown the postcopy fast path thread */
if (migrate_postcopy_preempt()) {
/*
* Shutdown the postcopy fast path thread. This is only needed
* when dest QEMU binary is old (7.1/7.2). QEMU 8.0+ doesn't need
* this.
*/
if (migrate_postcopy_preempt() && s->preempt_pre_7_2) {
postcopy_preempt_shutdown_file(s);
}

Expand Down Expand Up @@ -4384,6 +4388,15 @@ void migrate_fd_connect(MigrationState *s, Error *error_in)
}
}

/*
* This needs to be done before resuming a postcopy. Note: for newer
* QEMUs we will delay the channel creation until postcopy_start(), to
* avoid disorder of channel creations.
*/
if (migrate_postcopy_preempt() && s->preempt_pre_7_2) {
postcopy_preempt_setup(s);
}

if (resume) {
/* Wakeup the main migration thread to do the recovery */
migrate_set_state(&s->state, MIGRATION_STATUS_POSTCOPY_PAUSED,
Expand Down Expand Up @@ -4443,6 +4456,8 @@ static Property migration_properties[] = {
decompress_error_check, true),
DEFINE_PROP_UINT8("x-clear-bitmap-shift", MigrationState,
clear_bitmap_shift, CLEAR_BITMAP_SHIFT_DEFAULT),
DEFINE_PROP_BOOL("x-preempt-pre-7-2", MigrationState,
preempt_pre_7_2, false),

/* Migration parameters */
DEFINE_PROP_UINT8("x-compress-level", MigrationState,
Expand Down
41 changes: 40 additions & 1 deletion migration/migration.h
Expand Up @@ -65,6 +65,12 @@ typedef struct {
bool all_zero;
} PostcopyTmpPage;

typedef enum {
PREEMPT_THREAD_NONE = 0,
PREEMPT_THREAD_CREATED,
PREEMPT_THREAD_QUIT,
} PreemptThreadStatus;

/* State for the incoming migration */
struct MigrationIncomingState {
QEMUFile *from_src_file;
Expand Down Expand Up @@ -124,7 +130,12 @@ struct MigrationIncomingState {
QemuSemaphore postcopy_qemufile_dst_done;
/* Postcopy priority thread is used to receive postcopy requested pages */
QemuThread postcopy_prio_thread;
bool postcopy_prio_thread_created;
/*
* Always set by the main vm load thread only, but can be read by the
* postcopy preempt thread. "volatile" makes sure all reads will be
* uptodate across cores.
*/
volatile PreemptThreadStatus preempt_thread_status;
/*
* Used to sync between the ram load main thread and the fast ram load
* thread. It protects postcopy_qemufile_dst, which is the postcopy
Expand Down Expand Up @@ -364,6 +375,34 @@ struct MigrationState {
* do not trigger spurious decompression errors.
*/
bool decompress_error_check;
/*
* This variable only affects behavior when postcopy preempt mode is
* enabled.
*
* When set:
*
* - postcopy preempt src QEMU instance will generate an EOS message at
* the end of migration to shut the preempt channel on dest side.
*
* - postcopy preempt channel will be created at the setup phase on src
QEMU.
*
* When clear:
*
* - postcopy preempt src QEMU instance will _not_ generate an EOS
* message at the end of migration, the dest qemu will shutdown the
* channel itself.
*
* - postcopy preempt channel will be created at the switching phase
* from precopy -> postcopy (to avoid race condtion of misordered
* creation of channels).
*
* NOTE: See message-id <ZBoShWArKDPpX/D7@work-vm> on qemu-devel
* mailing list for more information on the possible race. Everyone
* should probably just keep this value untouched after set by the
* machine type (or the default).
*/
bool preempt_pre_7_2;

/*
* This decides the size of guest memory chunk that will be used
Expand Down
32 changes: 24 additions & 8 deletions migration/postcopy-ram.c
Expand Up @@ -568,9 +568,14 @@ int postcopy_ram_incoming_cleanup(MigrationIncomingState *mis)
{
trace_postcopy_ram_incoming_cleanup_entry();

if (mis->postcopy_prio_thread_created) {
if (mis->preempt_thread_status == PREEMPT_THREAD_CREATED) {
/* Notify the fast load thread to quit */
mis->preempt_thread_status = PREEMPT_THREAD_QUIT;
if (mis->postcopy_qemufile_dst) {
qemu_file_shutdown(mis->postcopy_qemufile_dst);
}
qemu_thread_join(&mis->postcopy_prio_thread);
mis->postcopy_prio_thread_created = false;
mis->preempt_thread_status = PREEMPT_THREAD_NONE;
}

if (mis->have_fault_thread) {
Expand Down Expand Up @@ -1203,7 +1208,7 @@ int postcopy_ram_incoming_setup(MigrationIncomingState *mis)
*/
postcopy_thread_create(mis, &mis->postcopy_prio_thread, "fault-fast",
postcopy_preempt_thread, QEMU_THREAD_JOINABLE);
mis->postcopy_prio_thread_created = true;
mis->preempt_thread_status = PREEMPT_THREAD_CREATED;
}

trace_postcopy_ram_enable_notify();
Expand Down Expand Up @@ -1495,7 +1500,7 @@ static PostcopyState incoming_postcopy_state;

PostcopyState postcopy_state_get(void)
{
return qatomic_mb_read(&incoming_postcopy_state);
return qatomic_load_acquire(&incoming_postcopy_state);
}

/* Set the state and return the old state */
Expand Down Expand Up @@ -1625,8 +1630,14 @@ int postcopy_preempt_establish_channel(MigrationState *s)
return 0;
}

/* Kick off async task to establish preempt channel */
postcopy_preempt_setup(s);
/*
* Kick off async task to establish preempt channel. Only do so with
* 8.0+ machines, because 7.1/7.2 require the channel to be created in
* setup phase of migration (even if racy in an unreliable network).
*/
if (!s->preempt_pre_7_2) {
postcopy_preempt_setup(s);
}

/*
* We need the postcopy preempt channel to be established before
Expand All @@ -1652,6 +1663,11 @@ static void postcopy_pause_ram_fast_load(MigrationIncomingState *mis)
trace_postcopy_pause_fast_load_continued();
}

static bool preempt_thread_should_run(MigrationIncomingState *mis)
{
return mis->preempt_thread_status != PREEMPT_THREAD_QUIT;
}

void *postcopy_preempt_thread(void *opaque)
{
MigrationIncomingState *mis = opaque;
Expand All @@ -1671,11 +1687,11 @@ void *postcopy_preempt_thread(void *opaque)

/* Sending RAM_SAVE_FLAG_EOS to terminate this thread */
qemu_mutex_lock(&mis->postcopy_prio_thread_mutex);
while (1) {
while (preempt_thread_should_run(mis)) {
ret = ram_load_postcopy(mis->postcopy_qemufile_dst,
RAM_CHANNEL_POSTCOPY);
/* If error happened, go into recovery routine */
if (ret) {
if (ret && preempt_thread_should_run(mis)) {
postcopy_pause_ram_fast_load(mis);
} else {
/* We're done */
Expand Down
14 changes: 7 additions & 7 deletions migration/qemu-file.c
Expand Up @@ -392,7 +392,7 @@ size_t ram_control_save_page(QEMUFile *f, ram_addr_t block_offset,
* case if the underlying file descriptor gives a short read, and that can
* happen even on a blocking fd.
*/
static ssize_t qemu_fill_buffer(QEMUFile *f)
static ssize_t coroutine_mixed_fn qemu_fill_buffer(QEMUFile *f)
{
int len;
int pending;
Expand Down Expand Up @@ -585,7 +585,7 @@ void qemu_file_skip(QEMUFile *f, int size)
* return as many as it managed to read (assuming blocking fd's which
* all current QEMUFile are)
*/
size_t qemu_peek_buffer(QEMUFile *f, uint8_t **buf, size_t size, size_t offset)
size_t coroutine_mixed_fn qemu_peek_buffer(QEMUFile *f, uint8_t **buf, size_t size, size_t offset)
{
ssize_t pending;
size_t index;
Expand Down Expand Up @@ -633,7 +633,7 @@ size_t qemu_peek_buffer(QEMUFile *f, uint8_t **buf, size_t size, size_t offset)
* return as many as it managed to read (assuming blocking fd's which
* all current QEMUFile are)
*/
size_t qemu_get_buffer(QEMUFile *f, uint8_t *buf, size_t size)
size_t coroutine_mixed_fn qemu_get_buffer(QEMUFile *f, uint8_t *buf, size_t size)
{
size_t pending = size;
size_t done = 0;
Expand Down Expand Up @@ -674,7 +674,7 @@ size_t qemu_get_buffer(QEMUFile *f, uint8_t *buf, size_t size)
* Note: Since **buf may get changed, the caller should take care to
* keep a pointer to the original buffer if it needs to deallocate it.
*/
size_t qemu_get_buffer_in_place(QEMUFile *f, uint8_t **buf, size_t size)
size_t coroutine_mixed_fn qemu_get_buffer_in_place(QEMUFile *f, uint8_t **buf, size_t size)
{
if (size < IO_BUF_SIZE) {
size_t res;
Expand All @@ -696,7 +696,7 @@ size_t qemu_get_buffer_in_place(QEMUFile *f, uint8_t **buf, size_t size)
* Peeks a single byte from the buffer; this isn't guaranteed to work if
* offset leaves a gap after the previous read/peeked data.
*/
int qemu_peek_byte(QEMUFile *f, int offset)
int coroutine_mixed_fn qemu_peek_byte(QEMUFile *f, int offset)
{
int index = f->buf_index + offset;

Expand All @@ -713,7 +713,7 @@ int qemu_peek_byte(QEMUFile *f, int offset)
return f->buf[index];
}

int qemu_get_byte(QEMUFile *f)
int coroutine_mixed_fn qemu_get_byte(QEMUFile *f)
{
int result;

Expand Down Expand Up @@ -894,7 +894,7 @@ int qemu_put_qemu_file(QEMUFile *f_des, QEMUFile *f_src)
* else 0
* (Note a 0 length string will return 0 either way)
*/
size_t qemu_get_counted_string(QEMUFile *f, char buf[256])
size_t coroutine_fn qemu_get_counted_string(QEMUFile *f, char buf[256])
{
size_t len = qemu_get_byte(f);
size_t res = qemu_get_buffer(f, (uint8_t *)buf, len);
Expand Down
6 changes: 3 additions & 3 deletions migration/qemu-file.h
Expand Up @@ -108,8 +108,8 @@ bool qemu_file_is_writable(QEMUFile *f);

#include "migration/qemu-file-types.h"

size_t qemu_peek_buffer(QEMUFile *f, uint8_t **buf, size_t size, size_t offset);
size_t qemu_get_buffer_in_place(QEMUFile *f, uint8_t **buf, size_t size);
size_t coroutine_mixed_fn qemu_peek_buffer(QEMUFile *f, uint8_t **buf, size_t size, size_t offset);
size_t coroutine_mixed_fn qemu_get_buffer_in_place(QEMUFile *f, uint8_t **buf, size_t size);
ssize_t qemu_put_compression_data(QEMUFile *f, z_stream *stream,
const uint8_t *p, size_t size);
int qemu_put_qemu_file(QEMUFile *f_des, QEMUFile *f_src);
Expand All @@ -119,7 +119,7 @@ int qemu_put_qemu_file(QEMUFile *f_des, QEMUFile *f_src);
* is; you aren't guaranteed to be able to peak to +n bytes unless you've
* previously peeked +n-1.
*/
int qemu_peek_byte(QEMUFile *f, int offset);
int coroutine_mixed_fn qemu_peek_byte(QEMUFile *f, int offset);
void qemu_file_skip(QEMUFile *f, int size);
/*
* qemu_file_credit_transfer:
Expand Down