802 changes: 792 additions & 10 deletions hw/nvme/ctrl.c

Large diffs are not rendered by default.

147 changes: 147 additions & 0 deletions hw/nvme/ns.c
Expand Up @@ -14,8 +14,10 @@

#include "qemu/osdep.h"
#include "qemu/units.h"
#include "qemu/cutils.h"
#include "qemu/error-report.h"
#include "qapi/error.h"
#include "qemu/bitops.h"
#include "sysemu/sysemu.h"
#include "sysemu/block-backend.h"

Expand Down Expand Up @@ -377,6 +379,130 @@ static void nvme_zoned_ns_shutdown(NvmeNamespace *ns)
assert(ns->nr_open_zones == 0);
}

static NvmeRuHandle *nvme_find_ruh_by_attr(NvmeEnduranceGroup *endgrp,
uint8_t ruha, uint16_t *ruhid)
{
for (uint16_t i = 0; i < endgrp->fdp.nruh; i++) {
NvmeRuHandle *ruh = &endgrp->fdp.ruhs[i];

if (ruh->ruha == ruha) {
*ruhid = i;
return ruh;
}
}

return NULL;
}

static bool nvme_ns_init_fdp(NvmeNamespace *ns, Error **errp)
{
NvmeEnduranceGroup *endgrp = ns->endgrp;
NvmeRuHandle *ruh;
uint8_t lbafi = NVME_ID_NS_FLBAS_INDEX(ns->id_ns.flbas);
unsigned int *ruhid, *ruhids;
char *r, *p, *token;
uint16_t *ph;

if (!ns->params.fdp.ruhs) {
ns->fdp.nphs = 1;
ph = ns->fdp.phs = g_new(uint16_t, 1);

ruh = nvme_find_ruh_by_attr(endgrp, NVME_RUHA_CTRL, ph);
if (!ruh) {
ruh = nvme_find_ruh_by_attr(endgrp, NVME_RUHA_UNUSED, ph);
if (!ruh) {
error_setg(errp, "no unused reclaim unit handles left");
return false;
}

ruh->ruha = NVME_RUHA_CTRL;
ruh->lbafi = lbafi;
ruh->ruamw = endgrp->fdp.runs >> ns->lbaf.ds;

for (uint16_t rg = 0; rg < endgrp->fdp.nrg; rg++) {
ruh->rus[rg].ruamw = ruh->ruamw;
}
} else if (ruh->lbafi != lbafi) {
error_setg(errp, "lba format index of controller assigned "
"reclaim unit handle does not match namespace lba "
"format index");
return false;
}

return true;
}

ruhid = ruhids = g_new0(unsigned int, endgrp->fdp.nruh);
r = p = strdup(ns->params.fdp.ruhs);

/* parse the placement handle identifiers */
while ((token = qemu_strsep(&p, ";")) != NULL) {
ns->fdp.nphs += 1;
if (ns->fdp.nphs > NVME_FDP_MAXPIDS ||
ns->fdp.nphs == endgrp->fdp.nruh) {
error_setg(errp, "too many placement handles");
free(r);
return false;
}

if (qemu_strtoui(token, NULL, 0, ruhid++) < 0) {
error_setg(errp, "cannot parse reclaim unit handle identifier");
free(r);
return false;
}
}

free(r);

ph = ns->fdp.phs = g_new(uint16_t, ns->fdp.nphs);

ruhid = ruhids;

/* verify the identifiers */
for (unsigned int i = 0; i < ns->fdp.nphs; i++, ruhid++, ph++) {
if (*ruhid >= endgrp->fdp.nruh) {
error_setg(errp, "invalid reclaim unit handle identifier");
return false;
}

ruh = &endgrp->fdp.ruhs[*ruhid];

switch (ruh->ruha) {
case NVME_RUHA_UNUSED:
ruh->ruha = NVME_RUHA_HOST;
ruh->lbafi = lbafi;
ruh->ruamw = endgrp->fdp.runs >> ns->lbaf.ds;

for (uint16_t rg = 0; rg < endgrp->fdp.nrg; rg++) {
ruh->rus[rg].ruamw = ruh->ruamw;
}

break;

case NVME_RUHA_HOST:
if (ruh->lbafi != lbafi) {
error_setg(errp, "lba format index of host assigned"
"reclaim unit handle does not match namespace "
"lba format index");
return false;
}

break;

case NVME_RUHA_CTRL:
error_setg(errp, "reclaim unit handle is controller assigned");
return false;

default:
abort();
}

*ph = *ruhid;
}

return true;
}

static int nvme_ns_check_constraints(NvmeNamespace *ns, Error **errp)
{
unsigned int pi_size;
Expand Down Expand Up @@ -417,6 +543,11 @@ static int nvme_ns_check_constraints(NvmeNamespace *ns, Error **errp)
return -1;
}

if (ns->params.zoned && ns->endgrp && ns->endgrp->fdp.enabled) {
error_setg(errp, "cannot be a zoned- in an FDP configuration");
return -1;
}

if (ns->params.zoned) {
if (ns->params.max_active_zones) {
if (ns->params.max_open_zones > ns->params.max_active_zones) {
Expand Down Expand Up @@ -502,6 +633,12 @@ int nvme_ns_setup(NvmeNamespace *ns, Error **errp)
nvme_ns_init_zoned(ns);
}

if (ns->endgrp && ns->endgrp->fdp.enabled) {
if (!nvme_ns_init_fdp(ns, errp)) {
return -1;
}
}

return 0;
}

Expand All @@ -525,6 +662,10 @@ void nvme_ns_cleanup(NvmeNamespace *ns)
g_free(ns->zone_array);
g_free(ns->zd_extensions);
}

if (ns->endgrp && ns->endgrp->fdp.enabled) {
g_free(ns->fdp.phs);
}
}

static void nvme_ns_unrealize(DeviceState *dev)
Expand Down Expand Up @@ -561,6 +702,8 @@ static void nvme_ns_realize(DeviceState *dev, Error **errp)
if (!qdev_set_parent_bus(dev, &subsys->bus.parent_bus, errp)) {
return;
}
ns->subsys = subsys;
ns->endgrp = &subsys->endgrp;
}

if (nvme_ns_setup(ns, errp)) {
Expand Down Expand Up @@ -591,6 +734,8 @@ static void nvme_ns_realize(DeviceState *dev, Error **errp)
if (subsys) {
subsys->namespaces[nsid] = ns;

ns->id_ns.endgid = cpu_to_le16(0x1);

if (ns->params.detached) {
return;
}
Expand All @@ -606,6 +751,7 @@ static void nvme_ns_realize(DeviceState *dev, Error **errp)

return;
}

}

nvme_attach_ns(n, ns);
Expand Down Expand Up @@ -644,6 +790,7 @@ static Property nvme_ns_props[] = {
DEFINE_PROP_SIZE("zoned.zrwafg", NvmeNamespace, params.zrwafg, -1),
DEFINE_PROP_BOOL("eui64-default", NvmeNamespace, params.eui64_default,
false),
DEFINE_PROP_STRING("fdp.ruhs", NvmeNamespace, params.fdp.ruhs),
DEFINE_PROP_END_OF_LIST(),
};

Expand Down
92 changes: 90 additions & 2 deletions hw/nvme/nvme.h
Expand Up @@ -27,6 +27,8 @@
#define NVME_MAX_CONTROLLERS 256
#define NVME_MAX_NAMESPACES 256
#define NVME_EUI64_DEFAULT ((uint64_t)0x5254000000000000)
#define NVME_FDP_MAX_EVENTS 63
#define NVME_FDP_MAXPIDS 128

QEMU_BUILD_BUG_ON(NVME_MAX_NAMESPACES > NVME_NSID_BROADCAST - 1);

Expand All @@ -45,17 +47,68 @@ typedef struct NvmeBus {
OBJECT_CHECK(NvmeSubsystem, (obj), TYPE_NVME_SUBSYS)
#define SUBSYS_SLOT_RSVD (void *)0xFFFF

typedef struct NvmeReclaimUnit {
uint64_t ruamw;
} NvmeReclaimUnit;

typedef struct NvmeRuHandle {
uint8_t ruht;
uint8_t ruha;
uint64_t event_filter;
uint8_t lbafi;
uint64_t ruamw;

/* reclaim units indexed by reclaim group */
NvmeReclaimUnit *rus;
} NvmeRuHandle;

typedef struct NvmeFdpEventBuffer {
NvmeFdpEvent events[NVME_FDP_MAX_EVENTS];
unsigned int nelems;
unsigned int start;
unsigned int next;
} NvmeFdpEventBuffer;

typedef struct NvmeEnduranceGroup {
uint8_t event_conf;

struct {
NvmeFdpEventBuffer host_events, ctrl_events;

uint16_t nruh;
uint16_t nrg;
uint8_t rgif;
uint64_t runs;

uint64_t hbmw;
uint64_t mbmw;
uint64_t mbe;

bool enabled;

NvmeRuHandle *ruhs;
} fdp;
} NvmeEnduranceGroup;

typedef struct NvmeSubsystem {
DeviceState parent_obj;
NvmeBus bus;
uint8_t subnqn[256];
char *serial;

NvmeCtrl *ctrls[NVME_MAX_CONTROLLERS];
NvmeNamespace *namespaces[NVME_MAX_NAMESPACES + 1];
NvmeCtrl *ctrls[NVME_MAX_CONTROLLERS];
NvmeNamespace *namespaces[NVME_MAX_NAMESPACES + 1];
NvmeEnduranceGroup endgrp;

struct {
char *nqn;

struct {
bool enabled;
uint64_t runs;
uint16_t nruh;
uint32_t nrg;
} fdp;
} params;
} NvmeSubsystem;

Expand Down Expand Up @@ -96,6 +149,21 @@ typedef struct NvmeZone {
QTAILQ_ENTRY(NvmeZone) entry;
} NvmeZone;

#define FDP_EVT_MAX 0xff
#define NVME_FDP_MAX_NS_RUHS 32u
#define FDPVSS 0

static const uint8_t nvme_fdp_evf_shifts[FDP_EVT_MAX] = {
/* Host events */
[FDP_EVT_RU_NOT_FULLY_WRITTEN] = 0,
[FDP_EVT_RU_ATL_EXCEEDED] = 1,
[FDP_EVT_CTRL_RESET_RUH] = 2,
[FDP_EVT_INVALID_PID] = 3,
/* CTRL events */
[FDP_EVT_MEDIA_REALLOC] = 32,
[FDP_EVT_RUH_IMPLICIT_RU_CHANGE] = 33,
};

typedef struct NvmeNamespaceParams {
bool detached;
bool shared;
Expand Down Expand Up @@ -125,6 +193,10 @@ typedef struct NvmeNamespaceParams {
uint32_t numzrwa;
uint64_t zrwas;
uint64_t zrwafg;

struct {
char *ruhs;
} fdp;
} NvmeNamespaceParams;

typedef struct NvmeNamespace {
Expand Down Expand Up @@ -167,10 +239,18 @@ typedef struct NvmeNamespace {
int32_t nr_active_zones;

NvmeNamespaceParams params;
NvmeSubsystem *subsys;
NvmeEnduranceGroup *endgrp;

struct {
uint32_t err_rec;
} features;

struct {
uint16_t nphs;
/* reclaim unit handle identifiers indexed by placement handle */
uint16_t *phs;
} fdp;
} NvmeNamespace;

static inline uint32_t nvme_nsid(NvmeNamespace *ns)
Expand Down Expand Up @@ -274,6 +354,12 @@ static inline void nvme_aor_dec_active(NvmeNamespace *ns)
assert(ns->nr_active_zones >= 0);
}

static inline void nvme_fdp_stat_inc(uint64_t *a, uint64_t b)
{
uint64_t ret = *a + b;
*a = ret < *a ? UINT64_MAX : ret;
}

void nvme_ns_init_format(NvmeNamespace *ns);
int nvme_ns_setup(NvmeNamespace *ns, Error **errp);
void nvme_ns_drain(NvmeNamespace *ns);
Expand Down Expand Up @@ -340,7 +426,9 @@ static inline const char *nvme_adm_opc_str(uint8_t opc)
case NVME_ADM_CMD_GET_FEATURES: return "NVME_ADM_CMD_GET_FEATURES";
case NVME_ADM_CMD_ASYNC_EV_REQ: return "NVME_ADM_CMD_ASYNC_EV_REQ";
case NVME_ADM_CMD_NS_ATTACHMENT: return "NVME_ADM_CMD_NS_ATTACHMENT";
case NVME_ADM_CMD_DIRECTIVE_SEND: return "NVME_ADM_CMD_DIRECTIVE_SEND";
case NVME_ADM_CMD_VIRT_MNGMT: return "NVME_ADM_CMD_VIRT_MNGMT";
case NVME_ADM_CMD_DIRECTIVE_RECV: return "NVME_ADM_CMD_DIRECTIVE_RECV";
case NVME_ADM_CMD_DBBUF_CONFIG: return "NVME_ADM_CMD_DBBUF_CONFIG";
case NVME_ADM_CMD_FORMAT_NVM: return "NVME_ADM_CMD_FORMAT_NVM";
default: return "NVME_ADM_CMD_UNKNOWN";
Expand Down
94 changes: 92 additions & 2 deletions hw/nvme/subsys.c
Expand Up @@ -7,10 +7,13 @@
*/

#include "qemu/osdep.h"
#include "qemu/units.h"
#include "qapi/error.h"

#include "nvme.h"

#define NVME_DEFAULT_RU_SIZE (96 * MiB)

static int nvme_subsys_reserve_cntlids(NvmeCtrl *n, int start, int num)
{
NvmeSubsystem *subsys = n->subsys;
Expand Down Expand Up @@ -109,13 +112,95 @@ void nvme_subsys_unregister_ctrl(NvmeSubsystem *subsys, NvmeCtrl *n)
n->cntlid = -1;
}

static void nvme_subsys_setup(NvmeSubsystem *subsys)
static bool nvme_calc_rgif(uint16_t nruh, uint16_t nrg, uint8_t *rgif)
{
uint16_t val;
unsigned int i;

if (unlikely(nrg == 1)) {
/* PIDRG_NORGI scenario, all of pid is used for PHID */
*rgif = 0;
return true;
}

val = nrg;
i = 0;
while (val) {
val >>= 1;
i++;
}
*rgif = i;

/* ensure remaining bits suffice to represent number of phids in a RG */
if (unlikely((UINT16_MAX >> i) < nruh)) {
*rgif = 0;
return false;
}

return true;
}

static bool nvme_subsys_setup_fdp(NvmeSubsystem *subsys, Error **errp)
{
NvmeEnduranceGroup *endgrp = &subsys->endgrp;

if (!subsys->params.fdp.runs) {
error_setg(errp, "fdp.runs must be non-zero");
return false;
}

endgrp->fdp.runs = subsys->params.fdp.runs;

if (!subsys->params.fdp.nrg) {
error_setg(errp, "fdp.nrg must be non-zero");
return false;
}

endgrp->fdp.nrg = subsys->params.fdp.nrg;

if (!subsys->params.fdp.nruh) {
error_setg(errp, "fdp.nruh must be non-zero");
return false;
}

endgrp->fdp.nruh = subsys->params.fdp.nruh;

if (!nvme_calc_rgif(endgrp->fdp.nruh, endgrp->fdp.nrg, &endgrp->fdp.rgif)) {
error_setg(errp,
"cannot derive a valid rgif (nruh %"PRIu16" nrg %"PRIu32")",
endgrp->fdp.nruh, endgrp->fdp.nrg);
return false;
}

endgrp->fdp.ruhs = g_new(NvmeRuHandle, endgrp->fdp.nruh);

for (uint16_t ruhid = 0; ruhid < endgrp->fdp.nruh; ruhid++) {
endgrp->fdp.ruhs[ruhid] = (NvmeRuHandle) {
.ruht = NVME_RUHT_INITIALLY_ISOLATED,
.ruha = NVME_RUHA_UNUSED,
};

endgrp->fdp.ruhs[ruhid].rus = g_new(NvmeReclaimUnit, endgrp->fdp.nrg);
}

endgrp->fdp.enabled = true;

return true;
}

static bool nvme_subsys_setup(NvmeSubsystem *subsys, Error **errp)
{
const char *nqn = subsys->params.nqn ?
subsys->params.nqn : subsys->parent_obj.id;

snprintf((char *)subsys->subnqn, sizeof(subsys->subnqn),
"nqn.2019-08.org.qemu:%s", nqn);

if (subsys->params.fdp.enabled && !nvme_subsys_setup_fdp(subsys, errp)) {
return false;
}

return true;
}

static void nvme_subsys_realize(DeviceState *dev, Error **errp)
Expand All @@ -124,11 +209,16 @@ static void nvme_subsys_realize(DeviceState *dev, Error **errp)

qbus_init(&subsys->bus, sizeof(NvmeBus), TYPE_NVME_BUS, dev, dev->id);

nvme_subsys_setup(subsys);
nvme_subsys_setup(subsys, errp);
}

static Property nvme_subsystem_props[] = {
DEFINE_PROP_STRING("nqn", NvmeSubsystem, params.nqn),
DEFINE_PROP_BOOL("fdp", NvmeSubsystem, params.fdp.enabled, false),
DEFINE_PROP_SIZE("fdp.runs", NvmeSubsystem, params.fdp.runs,
NVME_DEFAULT_RU_SIZE),
DEFINE_PROP_UINT32("fdp.nrg", NvmeSubsystem, params.fdp.nrg, 1),
DEFINE_PROP_UINT16("fdp.nruh", NvmeSubsystem, params.fdp.nruh, 0),
DEFINE_PROP_END_OF_LIST(),
};

Expand Down
1 change: 1 addition & 0 deletions hw/nvme/trace-events
Expand Up @@ -117,6 +117,7 @@ pci_nvme_clear_ns_reset(uint32_t state, uint64_t slba) "zone state=%"PRIu32", sl
pci_nvme_zoned_zrwa_implicit_flush(uint64_t zslba, uint32_t nlb) "zslba 0x%"PRIx64" nlb %"PRIu32""
pci_nvme_pci_reset(void) "PCI Function Level Reset"
pci_nvme_virt_mngmt(uint16_t cid, uint16_t act, uint16_t cntlid, const char* rt, uint16_t nr) "cid %"PRIu16", act=0x%"PRIx16", ctrlid=%"PRIu16" %s nr=%"PRIu16""
pci_nvme_fdp_ruh_change(uint16_t rgid, uint16_t ruhid) "change RU on RUH rgid=%"PRIu16", ruhid=%"PRIu16""

# error conditions
pci_nvme_err_mdts(size_t len) "len %zu"
Expand Down
64 changes: 53 additions & 11 deletions hw/xen/xen_pt.c
Expand Up @@ -57,6 +57,7 @@
#include <sys/ioctl.h>

#include "hw/pci/pci.h"
#include "hw/pci/pci_bus.h"
#include "hw/qdev-properties.h"
#include "hw/qdev-properties-system.h"
#include "hw/xen/xen.h"
Expand Down Expand Up @@ -780,15 +781,6 @@ static void xen_pt_realize(PCIDevice *d, Error **errp)
s->hostaddr.bus, s->hostaddr.slot, s->hostaddr.function,
s->dev.devfn);

xen_host_pci_device_get(&s->real_device,
s->hostaddr.domain, s->hostaddr.bus,
s->hostaddr.slot, s->hostaddr.function,
errp);
if (*errp) {
error_append_hint(errp, "Failed to \"open\" the real pci device");
return;
}

s->is_virtfn = s->real_device.is_virtfn;
if (s->is_virtfn) {
XEN_PT_LOG(d, "%04x:%02x:%02x.%d is a SR-IOV Virtual Function\n",
Expand All @@ -803,8 +795,10 @@ static void xen_pt_realize(PCIDevice *d, Error **errp)
s->io_listener = xen_pt_io_listener;

/* Setup VGA bios for passthrough GFX */
if ((s->real_device.domain == 0) && (s->real_device.bus == 0) &&
(s->real_device.dev == 2) && (s->real_device.func == 0)) {
if ((s->real_device.domain == XEN_PCI_IGD_DOMAIN) &&
(s->real_device.bus == XEN_PCI_IGD_BUS) &&
(s->real_device.dev == XEN_PCI_IGD_DEV) &&
(s->real_device.func == XEN_PCI_IGD_FN)) {
if (!is_igd_vga_passthrough(&s->real_device)) {
error_setg(errp, "Need to enable igd-passthru if you're trying"
" to passthrough IGD GFX");
Expand Down Expand Up @@ -950,11 +944,58 @@ static void xen_pci_passthrough_instance_init(Object *obj)
PCI_DEVICE(obj)->cap_present |= QEMU_PCI_CAP_EXPRESS;
}

void xen_igd_reserve_slot(PCIBus *pci_bus)
{
if (!xen_igd_gfx_pt_enabled()) {
return;
}

XEN_PT_LOG(0, "Reserving PCI slot 2 for IGD\n");
pci_bus->slot_reserved_mask |= XEN_PCI_IGD_SLOT_MASK;
}

static void xen_igd_clear_slot(DeviceState *qdev, Error **errp)
{
ERRP_GUARD();
PCIDevice *pci_dev = (PCIDevice *)qdev;
XenPCIPassthroughState *s = XEN_PT_DEVICE(pci_dev);
XenPTDeviceClass *xpdc = XEN_PT_DEVICE_GET_CLASS(s);
PCIBus *pci_bus = pci_get_bus(pci_dev);

xen_host_pci_device_get(&s->real_device,
s->hostaddr.domain, s->hostaddr.bus,
s->hostaddr.slot, s->hostaddr.function,
errp);
if (*errp) {
error_append_hint(errp, "Failed to \"open\" the real pci device");
return;
}

if (!(pci_bus->slot_reserved_mask & XEN_PCI_IGD_SLOT_MASK)) {
xpdc->pci_qdev_realize(qdev, errp);
return;
}

if (is_igd_vga_passthrough(&s->real_device) &&
s->real_device.domain == XEN_PCI_IGD_DOMAIN &&
s->real_device.bus == XEN_PCI_IGD_BUS &&
s->real_device.dev == XEN_PCI_IGD_DEV &&
s->real_device.func == XEN_PCI_IGD_FN &&
s->real_device.vendor_id == PCI_VENDOR_ID_INTEL) {
pci_bus->slot_reserved_mask &= ~XEN_PCI_IGD_SLOT_MASK;
XEN_PT_LOG(pci_dev, "Intel IGD found, using slot 2\n");
}
xpdc->pci_qdev_realize(qdev, errp);
}

static void xen_pci_passthrough_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);

XenPTDeviceClass *xpdc = XEN_PT_DEVICE_CLASS(klass);
xpdc->pci_qdev_realize = dc->realize;
dc->realize = xen_igd_clear_slot;
k->realize = xen_pt_realize;
k->exit = xen_pt_unregister_device;
k->config_read = xen_pt_pci_read_config;
Expand All @@ -977,6 +1018,7 @@ static const TypeInfo xen_pci_passthrough_info = {
.instance_size = sizeof(XenPCIPassthroughState),
.instance_finalize = xen_pci_passthrough_finalize,
.class_init = xen_pci_passthrough_class_init,
.class_size = sizeof(XenPTDeviceClass),
.instance_init = xen_pci_passthrough_instance_init,
.interfaces = (InterfaceInfo[]) {
{ INTERFACE_CONVENTIONAL_PCI_DEVICE },
Expand Down
20 changes: 20 additions & 0 deletions hw/xen/xen_pt.h
Expand Up @@ -40,7 +40,20 @@ typedef struct XenPTReg XenPTReg;
#define TYPE_XEN_PT_DEVICE "xen-pci-passthrough"
OBJECT_DECLARE_SIMPLE_TYPE(XenPCIPassthroughState, XEN_PT_DEVICE)

#define XEN_PT_DEVICE_CLASS(klass) \
OBJECT_CLASS_CHECK(XenPTDeviceClass, klass, TYPE_XEN_PT_DEVICE)
#define XEN_PT_DEVICE_GET_CLASS(obj) \
OBJECT_GET_CLASS(XenPTDeviceClass, obj, TYPE_XEN_PT_DEVICE)

typedef void (*XenPTQdevRealize)(DeviceState *qdev, Error **errp);

typedef struct XenPTDeviceClass {
PCIDeviceClass parent_class;
XenPTQdevRealize pci_qdev_realize;
} XenPTDeviceClass;

uint32_t igd_read_opregion(XenPCIPassthroughState *s);
void xen_igd_reserve_slot(PCIBus *pci_bus);
void igd_write_opregion(XenPCIPassthroughState *s, uint32_t val);
void xen_igd_passthrough_isa_bridge_create(XenPCIPassthroughState *s,
XenHostPCIDevice *dev);
Expand Down Expand Up @@ -75,6 +88,13 @@ typedef int (*xen_pt_conf_byte_read)

#define XEN_PCI_INTEL_OPREGION 0xfc

#define XEN_PCI_IGD_DOMAIN 0
#define XEN_PCI_IGD_BUS 0
#define XEN_PCI_IGD_DEV 2
#define XEN_PCI_IGD_FN 0
#define XEN_PCI_IGD_SLOT_MASK \
(1UL << PCI_SLOT(PCI_DEVFN(XEN_PCI_IGD_DEV, XEN_PCI_IGD_FN)))

typedef enum {
XEN_PT_GRP_TYPE_HARDWIRED = 0, /* 0 Hardwired reg group */
XEN_PT_GRP_TYPE_EMU, /* emul reg group */
Expand Down
2 changes: 1 addition & 1 deletion hw/xen/xen_pt_config_init.c
Expand Up @@ -1924,7 +1924,7 @@ static void xen_pt_config_reg_init(XenPCIPassthroughState *s,
if (reg->init) {
uint32_t host_mask, size_mask;
unsigned int offset;
uint32_t val;
uint32_t val = 0;

/* initialize emulate register */
rc = reg->init(s, reg_entry->reg,
Expand Down
4 changes: 4 additions & 0 deletions hw/xen/xen_pt_stub.c
Expand Up @@ -20,3 +20,7 @@ void xen_igd_gfx_pt_set(bool value, Error **errp)
error_setg(errp, "Xen PCI passthrough support not built in");
}
}

void xen_igd_reserve_slot(PCIBus *pci_bus)
{
}
236 changes: 223 additions & 13 deletions include/block/nvme.h
@@ -1,6 +1,8 @@
#ifndef BLOCK_NVME_H
#define BLOCK_NVME_H

#include "hw/registerfields.h"

typedef struct QEMU_PACKED NvmeBar {
uint64_t cap;
uint32_t vs;
Expand Down Expand Up @@ -58,6 +60,24 @@ enum NvmeBarRegs {
NVME_REG_PMRMSCU = offsetof(NvmeBar, pmrmscu),
};

typedef struct QEMU_PACKED NvmeEndGrpLog {
uint8_t critical_warning;
uint8_t rsvd[2];
uint8_t avail_spare;
uint8_t avail_spare_thres;
uint8_t percet_used;
uint8_t rsvd1[26];
uint64_t end_estimate[2];
uint64_t data_units_read[2];
uint64_t data_units_written[2];
uint64_t media_units_written[2];
uint64_t host_read_commands[2];
uint64_t host_write_commands[2];
uint64_t media_integrity_errors[2];
uint64_t no_err_info_log_entries[2];
uint8_t rsvd2[352];
} NvmeEndGrpLog;

enum NvmeCapShift {
CAP_MQES_SHIFT = 0,
CAP_CQR_SHIFT = 16,
Expand Down Expand Up @@ -595,7 +615,9 @@ enum NvmeAdminCommands {
NVME_ADM_CMD_ACTIVATE_FW = 0x10,
NVME_ADM_CMD_DOWNLOAD_FW = 0x11,
NVME_ADM_CMD_NS_ATTACHMENT = 0x15,
NVME_ADM_CMD_DIRECTIVE_SEND = 0x19,
NVME_ADM_CMD_VIRT_MNGMT = 0x1c,
NVME_ADM_CMD_DIRECTIVE_RECV = 0x1a,
NVME_ADM_CMD_DBBUF_CONFIG = 0x7c,
NVME_ADM_CMD_FORMAT_NVM = 0x80,
NVME_ADM_CMD_SECURITY_SEND = 0x81,
Expand All @@ -611,7 +633,9 @@ enum NvmeIoCommands {
NVME_CMD_WRITE_ZEROES = 0x08,
NVME_CMD_DSM = 0x09,
NVME_CMD_VERIFY = 0x0c,
NVME_CMD_IO_MGMT_RECV = 0x12,
NVME_CMD_COPY = 0x19,
NVME_CMD_IO_MGMT_SEND = 0x1d,
NVME_CMD_ZONE_MGMT_SEND = 0x79,
NVME_CMD_ZONE_MGMT_RECV = 0x7a,
NVME_CMD_ZONE_APPEND = 0x7d,
Expand Down Expand Up @@ -704,7 +728,9 @@ typedef struct QEMU_PACKED NvmeRwCmd {
uint64_t slba;
uint16_t nlb;
uint16_t control;
uint32_t dsmgmt;
uint8_t dsmgmt;
uint8_t rsvd;
uint16_t dspec;
uint32_t reftag;
uint16_t apptag;
uint16_t appmask;
Expand Down Expand Up @@ -875,6 +901,8 @@ enum NvmeStatusCodes {
NVME_INVALID_PRP_OFFSET = 0x0013,
NVME_CMD_SET_CMB_REJECTED = 0x002b,
NVME_INVALID_CMD_SET = 0x002c,
NVME_FDP_DISABLED = 0x0029,
NVME_INVALID_PHID_LIST = 0x002a,
NVME_LBA_RANGE = 0x0080,
NVME_CAP_EXCEEDED = 0x0081,
NVME_NS_NOT_READY = 0x0082,
Expand Down Expand Up @@ -1005,11 +1033,16 @@ enum {
};

enum NvmeLogIdentifier {
NVME_LOG_ERROR_INFO = 0x01,
NVME_LOG_SMART_INFO = 0x02,
NVME_LOG_FW_SLOT_INFO = 0x03,
NVME_LOG_CHANGED_NSLIST = 0x04,
NVME_LOG_CMD_EFFECTS = 0x05,
NVME_LOG_ERROR_INFO = 0x01,
NVME_LOG_SMART_INFO = 0x02,
NVME_LOG_FW_SLOT_INFO = 0x03,
NVME_LOG_CHANGED_NSLIST = 0x04,
NVME_LOG_CMD_EFFECTS = 0x05,
NVME_LOG_ENDGRP = 0x09,
NVME_LOG_FDP_CONFS = 0x20,
NVME_LOG_FDP_RUH_USAGE = 0x21,
NVME_LOG_FDP_STATS = 0x22,
NVME_LOG_FDP_EVENTS = 0x23,
};

typedef struct QEMU_PACKED NvmePSD {
Expand Down Expand Up @@ -1091,7 +1124,10 @@ typedef struct QEMU_PACKED NvmeIdCtrl {
uint16_t mntmt;
uint16_t mxtmt;
uint32_t sanicap;
uint8_t rsvd332[180];
uint8_t rsvd332[6];
uint16_t nsetidmax;
uint16_t endgidmax;
uint8_t rsvd342[170];
uint8_t sqes;
uint8_t cqes;
uint16_t maxcmd;
Expand Down Expand Up @@ -1134,15 +1170,18 @@ enum NvmeIdCtrlOaes {
};

enum NvmeIdCtrlCtratt {
NVME_CTRATT_ENDGRPS = 1 << 4,
NVME_CTRATT_ELBAS = 1 << 15,
NVME_CTRATT_FDPS = 1 << 19,
};

enum NvmeIdCtrlOacs {
NVME_OACS_SECURITY = 1 << 0,
NVME_OACS_FORMAT = 1 << 1,
NVME_OACS_FW = 1 << 2,
NVME_OACS_NS_MGMT = 1 << 3,
NVME_OACS_DBBUF = 1 << 8,
NVME_OACS_SECURITY = 1 << 0,
NVME_OACS_FORMAT = 1 << 1,
NVME_OACS_FW = 1 << 2,
NVME_OACS_NS_MGMT = 1 << 3,
NVME_OACS_DIRECTIVES = 1 << 5,
NVME_OACS_DBBUF = 1 << 8,
};

enum NvmeIdCtrlOncs {
Expand Down Expand Up @@ -1227,6 +1266,7 @@ enum NvmeNsAttachmentOperation {
#define NVME_AEC_SMART(aec) (aec & 0xff)
#define NVME_AEC_NS_ATTR(aec) ((aec >> 8) & 0x1)
#define NVME_AEC_FW_ACTIVATION(aec) ((aec >> 9) & 0x1)
#define NVME_AEC_ENDGRP_NOTICE(aec) ((aec >> 14) & 0x1)

#define NVME_ERR_REC_TLER(err_rec) (err_rec & 0xffff)
#define NVME_ERR_REC_DULBE(err_rec) (err_rec & 0x10000)
Expand All @@ -1246,6 +1286,8 @@ enum NvmeFeatureIds {
NVME_TIMESTAMP = 0xe,
NVME_HOST_BEHAVIOR_SUPPORT = 0x16,
NVME_COMMAND_SET_PROFILE = 0x19,
NVME_FDP_MODE = 0x1d,
NVME_FDP_EVENTS = 0x1e,
NVME_SOFTWARE_PROGRESS_MARKER = 0x80,
NVME_FID_MAX = 0x100,
};
Expand Down Expand Up @@ -1338,7 +1380,10 @@ typedef struct QEMU_PACKED NvmeIdNs {
uint16_t mssrl;
uint32_t mcl;
uint8_t msrc;
uint8_t rsvd81[23];
uint8_t rsvd81[18];
uint8_t nsattr;
uint16_t nvmsetid;
uint16_t endgid;
uint8_t nguid[16];
uint64_t eui64;
NvmeLBAF lbaf[NVME_MAX_NLBAF];
Expand Down Expand Up @@ -1617,6 +1662,169 @@ typedef enum NvmeVirtualResourceType {
NVME_VIRT_RES_INTERRUPT = 0x01,
} NvmeVirtualResourceType;

typedef struct NvmeDirectiveIdentify {
uint8_t supported;
uint8_t unused1[31];
uint8_t enabled;
uint8_t unused33[31];
uint8_t persistent;
uint8_t unused65[31];
uint8_t rsvd64[4000];
} NvmeDirectiveIdentify;

enum NvmeDirectiveTypes {
NVME_DIRECTIVE_IDENTIFY = 0x0,
NVME_DIRECTIVE_DATA_PLACEMENT = 0x2,
};

enum NvmeDirectiveOperations {
NVME_DIRECTIVE_RETURN_PARAMS = 0x1,
};

typedef struct QEMU_PACKED NvmeFdpConfsHdr {
uint16_t num_confs;
uint8_t version;
uint8_t rsvd3;
uint32_t size;
uint8_t rsvd8[8];
} NvmeFdpConfsHdr;

REG8(FDPA, 0x0)
FIELD(FDPA, RGIF, 0, 4)
FIELD(FDPA, VWC, 4, 1)
FIELD(FDPA, VALID, 7, 1);

typedef struct QEMU_PACKED NvmeFdpDescrHdr {
uint16_t descr_size;
uint8_t fdpa;
uint8_t vss;
uint32_t nrg;
uint16_t nruh;
uint16_t maxpids;
uint32_t nnss;
uint64_t runs;
uint32_t erutl;
uint8_t rsvd28[36];
} NvmeFdpDescrHdr;

enum NvmeRuhType {
NVME_RUHT_INITIALLY_ISOLATED = 1,
NVME_RUHT_PERSISTENTLY_ISOLATED = 2,
};

typedef struct QEMU_PACKED NvmeRuhDescr {
uint8_t ruht;
uint8_t rsvd1[3];
} NvmeRuhDescr;

typedef struct QEMU_PACKED NvmeRuhuLog {
uint16_t nruh;
uint8_t rsvd2[6];
} NvmeRuhuLog;

enum NvmeRuhAttributes {
NVME_RUHA_UNUSED = 0,
NVME_RUHA_HOST = 1,
NVME_RUHA_CTRL = 2,
};

typedef struct QEMU_PACKED NvmeRuhuDescr {
uint8_t ruha;
uint8_t rsvd1[7];
} NvmeRuhuDescr;

typedef struct QEMU_PACKED NvmeFdpStatsLog {
uint64_t hbmw[2];
uint64_t mbmw[2];
uint64_t mbe[2];
uint8_t rsvd48[16];
} NvmeFdpStatsLog;

typedef struct QEMU_PACKED NvmeFdpEventsLog {
uint32_t num_events;
uint8_t rsvd4[60];
} NvmeFdpEventsLog;

enum NvmeFdpEventType {
FDP_EVT_RU_NOT_FULLY_WRITTEN = 0x0,
FDP_EVT_RU_ATL_EXCEEDED = 0x1,
FDP_EVT_CTRL_RESET_RUH = 0x2,
FDP_EVT_INVALID_PID = 0x3,
FDP_EVT_MEDIA_REALLOC = 0x80,
FDP_EVT_RUH_IMPLICIT_RU_CHANGE = 0x81,
};

enum NvmeFdpEventFlags {
FDPEF_PIV = 1 << 0,
FDPEF_NSIDV = 1 << 1,
FDPEF_LV = 1 << 2,
};

typedef struct QEMU_PACKED NvmeFdpEvent {
uint8_t type;
uint8_t flags;
uint16_t pid;
uint64_t timestamp;
uint32_t nsid;
uint64_t type_specific[2];
uint16_t rgid;
uint8_t ruhid;
uint8_t rsvd35[5];
uint64_t vendor[3];
} NvmeFdpEvent;

typedef struct QEMU_PACKED NvmePhidList {
uint16_t nnruhd;
uint8_t rsvd2[6];
} NvmePhidList;

typedef struct QEMU_PACKED NvmePhidDescr {
uint8_t ruht;
uint8_t rsvd1;
uint16_t ruhid;
} NvmePhidDescr;

REG32(FEAT_FDP, 0x0)
FIELD(FEAT_FDP, FDPE, 0, 1)
FIELD(FEAT_FDP, CONF_NDX, 8, 8);

typedef struct QEMU_PACKED NvmeFdpEventDescr {
uint8_t evt;
uint8_t evta;
} NvmeFdpEventDescr;

REG32(NVME_IOMR, 0x0)
FIELD(NVME_IOMR, MO, 0, 8)
FIELD(NVME_IOMR, MOS, 16, 16);

enum NvmeIomr2Mo {
NVME_IOMR_MO_NOP = 0x0,
NVME_IOMR_MO_RUH_STATUS = 0x1,
NVME_IOMR_MO_VENDOR_SPECIFIC = 0x255,
};

typedef struct QEMU_PACKED NvmeRuhStatus {
uint8_t rsvd0[14];
uint16_t nruhsd;
} NvmeRuhStatus;

typedef struct QEMU_PACKED NvmeRuhStatusDescr {
uint16_t pid;
uint16_t ruhid;
uint32_t earutr;
uint64_t ruamw;
uint8_t rsvd16[16];
} NvmeRuhStatusDescr;

REG32(NVME_IOMS, 0x0)
FIELD(NVME_IOMS, MO, 0, 8)
FIELD(NVME_IOMS, MOS, 16, 16);

enum NvmeIoms2Mo {
NVME_IOMS_MO_NOP = 0x0,
NVME_IOMS_MO_RUH_UPDATE = 0x1,
};

static inline void _nvme_check_size(void)
{
QEMU_BUILD_BUG_ON(sizeof(NvmeBar) != 4096);
Expand Down Expand Up @@ -1655,5 +1863,7 @@ static inline void _nvme_check_size(void)
QEMU_BUILD_BUG_ON(sizeof(NvmePriCtrlCap) != 4096);
QEMU_BUILD_BUG_ON(sizeof(NvmeSecCtrlEntry) != 32);
QEMU_BUILD_BUG_ON(sizeof(NvmeSecCtrlList) != 4096);
QEMU_BUILD_BUG_ON(sizeof(NvmeEndGrpLog) != 512);
QEMU_BUILD_BUG_ON(sizeof(NvmeDirectiveIdentify) != 4096);
}
#endif
6 changes: 6 additions & 0 deletions include/hw/arm/allwinner-h3.h
Expand Up @@ -84,6 +84,8 @@ enum {
AW_H3_DEV_UART3,
AW_H3_DEV_EMAC,
AW_H3_DEV_TWI0,
AW_H3_DEV_TWI1,
AW_H3_DEV_TWI2,
AW_H3_DEV_DRAMCOM,
AW_H3_DEV_DRAMCTL,
AW_H3_DEV_DRAMPHY,
Expand All @@ -93,6 +95,7 @@ enum {
AW_H3_DEV_GIC_VCPU,
AW_H3_DEV_RTC,
AW_H3_DEV_CPUCFG,
AW_H3_DEV_R_TWI,
AW_H3_DEV_SDRAM
};

Expand Down Expand Up @@ -133,6 +136,9 @@ struct AwH3State {
AwSidState sid;
AwSdHostState mmc0;
AWI2CState i2c0;
AWI2CState i2c1;
AWI2CState i2c2;
AWI2CState r_twi;
AwSun8iEmacState emac;
AwRtcState rtc;
GICState gic;
Expand Down
6 changes: 6 additions & 0 deletions include/hw/i2c/allwinner-i2c.h
Expand Up @@ -28,6 +28,10 @@
#include "qom/object.h"

#define TYPE_AW_I2C "allwinner.i2c"

/** Allwinner I2C sun6i family and newer (A31, H2+, H3, etc) */
#define TYPE_AW_I2C_SUN6I TYPE_AW_I2C "-sun6i"

OBJECT_DECLARE_SIMPLE_TYPE(AWI2CState, AW_I2C)

#define AW_I2C_MEM_SIZE 0x24
Expand All @@ -50,6 +54,8 @@ struct AWI2CState {
uint8_t srst;
uint8_t efr;
uint8_t lcr;

bool irq_clear_inverted;
};

#endif /* ALLWINNER_I2C_H */
19 changes: 19 additions & 0 deletions include/hw/loader.h
Expand Up @@ -86,6 +86,25 @@ ssize_t load_image_gzipped_buffer(const char *filename, uint64_t max_sz,
uint8_t **buffer);
ssize_t load_image_gzipped(const char *filename, hwaddr addr, uint64_t max_sz);

/**
* unpack_efi_zboot_image:
* @buffer: pointer to a variable holding the address of a buffer containing the
* image
* @size: pointer to a variable holding the size of the buffer
*
* Check whether the buffer contains a EFI zboot image, and if it does, extract
* the compressed payload and decompress it into a new buffer. If successful,
* the old buffer is freed, and the *buffer and size variables pointed to by the
* function arguments are updated to refer to the newly populated buffer.
*
* Returns 0 if the image could not be identified as a EFI zboot image.
* Returns -1 if the buffer contents were identified as a EFI zboot image, but
* unpacking failed for any reason.
* Returns the size of the decompressed payload if decompression was performed
* successfully.
*/
ssize_t unpack_efi_zboot_image(uint8_t **buffer, int *size);

#define ELF_LOAD_FAILED -1
#define ELF_LOAD_NOT_ELF -2
#define ELF_LOAD_WRONG_ARCH -3
Expand Down
17 changes: 9 additions & 8 deletions target/arm/cpu.h
Expand Up @@ -869,6 +869,8 @@ struct ArchCPU {

DynamicGDBXMLInfo dyn_sysreg_xml;
DynamicGDBXMLInfo dyn_svereg_xml;
DynamicGDBXMLInfo dyn_m_systemreg_xml;
DynamicGDBXMLInfo dyn_m_secextreg_xml;

/* Timers used by the generic (architected) timer */
QEMUTimer *gt_timer[NUM_GTIMERS];
Expand Down Expand Up @@ -1112,13 +1114,6 @@ hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cpu, vaddr addr,
int arm_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
int arm_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);

/*
* Helpers to dynamically generates XML descriptions of the sysregs
* and SVE registers. Returns the number of registers in each set.
*/
int arm_gen_dynamic_sysreg_xml(CPUState *cpu, int base_reg);
int arm_gen_dynamic_svereg_xml(CPUState *cpu, int base_reg);

/* Returns the dynamically generated XML for the gdb stub.
* Returns a pointer to the XML contents for the specified XML file or NULL
* if the XML name doesn't match the predefined one.
Expand Down Expand Up @@ -2389,14 +2384,16 @@ static inline int arm_feature(CPUARMState *env, int feature)
void arm_cpu_finalize_features(ARMCPU *cpu, Error **errp);

#if !defined(CONFIG_USER_ONLY)
/* Return true if exception levels below EL3 are in secure state,
/*
* Return true if exception levels below EL3 are in secure state,
* or would be following an exception return to that level.
* Unlike arm_is_secure() (which is always a question about the
* _current_ state of the CPU) this doesn't care about the current
* EL or mode.
*/
static inline bool arm_is_secure_below_el3(CPUARMState *env)
{
assert(!arm_feature(env, ARM_FEATURE_M));
if (arm_feature(env, ARM_FEATURE_EL3)) {
return !(env->cp15.scr_el3 & SCR_NS);
} else {
Expand All @@ -2410,6 +2407,7 @@ static inline bool arm_is_secure_below_el3(CPUARMState *env)
/* Return true if the CPU is AArch64 EL3 or AArch32 Mon */
static inline bool arm_is_el3_or_mon(CPUARMState *env)
{
assert(!arm_feature(env, ARM_FEATURE_M));
if (arm_feature(env, ARM_FEATURE_EL3)) {
if (is_a64(env) && extract32(env->pstate, 2, 2) == 3) {
/* CPU currently in AArch64 state and EL3 */
Expand All @@ -2426,6 +2424,9 @@ static inline bool arm_is_el3_or_mon(CPUARMState *env)
/* Return true if the processor is in secure state */
static inline bool arm_is_secure(CPUARMState *env)
{
if (arm_feature(env, ARM_FEATURE_M)) {
return env->v7m.secure;
}
if (arm_is_el3_or_mon(env)) {
return true;
}
Expand Down
272 changes: 168 additions & 104 deletions target/arm/gdbstub.c
Expand Up @@ -305,7 +305,7 @@ static void arm_register_sysreg_for_xml(gpointer key, gpointer value,
}
}

int arm_gen_dynamic_sysreg_xml(CPUState *cs, int base_reg)
static int arm_gen_dynamic_sysreg_xml(CPUState *cs, int base_reg)
{
ARMCPU *cpu = ARM_CPU(cs);
GString *s = g_string_new(NULL);
Expand All @@ -322,125 +322,163 @@ int arm_gen_dynamic_sysreg_xml(CPUState *cs, int base_reg)
return cpu->dyn_sysreg_xml.num;
}

struct TypeSize {
const char *gdb_type;
int size;
const char sz, suffix;
typedef enum {
M_SYSREG_MSP,
M_SYSREG_PSP,
M_SYSREG_PRIMASK,
M_SYSREG_CONTROL,
M_SYSREG_BASEPRI,
M_SYSREG_FAULTMASK,
M_SYSREG_MSPLIM,
M_SYSREG_PSPLIM,
} MProfileSysreg;

static const struct {
const char *name;
int feature;
} m_sysreg_def[] = {
[M_SYSREG_MSP] = { "msp", ARM_FEATURE_M },
[M_SYSREG_PSP] = { "psp", ARM_FEATURE_M },
[M_SYSREG_PRIMASK] = { "primask", ARM_FEATURE_M },
[M_SYSREG_CONTROL] = { "control", ARM_FEATURE_M },
[M_SYSREG_BASEPRI] = { "basepri", ARM_FEATURE_M_MAIN },
[M_SYSREG_FAULTMASK] = { "faultmask", ARM_FEATURE_M_MAIN },
[M_SYSREG_MSPLIM] = { "msplim", ARM_FEATURE_V8 },
[M_SYSREG_PSPLIM] = { "psplim", ARM_FEATURE_V8 },
};

static const struct TypeSize vec_lanes[] = {
/* quads */
{ "uint128", 128, 'q', 'u' },
{ "int128", 128, 'q', 's' },
/* 64 bit */
{ "ieee_double", 64, 'd', 'f' },
{ "uint64", 64, 'd', 'u' },
{ "int64", 64, 'd', 's' },
/* 32 bit */
{ "ieee_single", 32, 's', 'f' },
{ "uint32", 32, 's', 'u' },
{ "int32", 32, 's', 's' },
/* 16 bit */
{ "ieee_half", 16, 'h', 'f' },
{ "uint16", 16, 'h', 'u' },
{ "int16", 16, 'h', 's' },
/* bytes */
{ "uint8", 8, 'b', 'u' },
{ "int8", 8, 'b', 's' },
};
static uint32_t *m_sysreg_ptr(CPUARMState *env, MProfileSysreg reg, bool sec)
{
uint32_t *ptr;

switch (reg) {
case M_SYSREG_MSP:
ptr = arm_v7m_get_sp_ptr(env, sec, false, true);
break;
case M_SYSREG_PSP:
ptr = arm_v7m_get_sp_ptr(env, sec, true, true);
break;
case M_SYSREG_MSPLIM:
ptr = &env->v7m.msplim[sec];
break;
case M_SYSREG_PSPLIM:
ptr = &env->v7m.psplim[sec];
break;
case M_SYSREG_PRIMASK:
ptr = &env->v7m.primask[sec];
break;
case M_SYSREG_BASEPRI:
ptr = &env->v7m.basepri[sec];
break;
case M_SYSREG_FAULTMASK:
ptr = &env->v7m.faultmask[sec];
break;
case M_SYSREG_CONTROL:
ptr = &env->v7m.control[sec];
break;
default:
return NULL;
}
return arm_feature(env, m_sysreg_def[reg].feature) ? ptr : NULL;
}

static int m_sysreg_get(CPUARMState *env, GByteArray *buf,
MProfileSysreg reg, bool secure)
{
uint32_t *ptr = m_sysreg_ptr(env, reg, secure);

if (ptr == NULL) {
return 0;
}
return gdb_get_reg32(buf, *ptr);
}

static int arm_gdb_get_m_systemreg(CPUARMState *env, GByteArray *buf, int reg)
{
/*
* Here, we emulate MRS instruction, where CONTROL has a mix of
* banked and non-banked bits.
*/
if (reg == M_SYSREG_CONTROL) {
return gdb_get_reg32(buf, arm_v7m_mrs_control(env, env->v7m.secure));
}
return m_sysreg_get(env, buf, reg, env->v7m.secure);
}

static int arm_gdb_set_m_systemreg(CPUARMState *env, uint8_t *buf, int reg)
{
return 0; /* TODO */
}

int arm_gen_dynamic_svereg_xml(CPUState *cs, int base_reg)
static int arm_gen_dynamic_m_systemreg_xml(CPUState *cs, int orig_base_reg)
{
ARMCPU *cpu = ARM_CPU(cs);
CPUARMState *env = &cpu->env;
GString *s = g_string_new(NULL);
DynamicGDBXMLInfo *info = &cpu->dyn_svereg_xml;
g_autoptr(GString) ts = g_string_new("");
int i, j, bits, reg_width = (cpu->sve_max_vq * 128);
info->num = 0;
int base_reg = orig_base_reg;
int i;

g_string_printf(s, "<?xml version=\"1.0\"?>");
g_string_append_printf(s, "<!DOCTYPE target SYSTEM \"gdb-target.dtd\">");
g_string_append_printf(s, "<feature name=\"org.gnu.gdb.aarch64.sve\">");
g_string_append_printf(s, "<feature name=\"org.gnu.gdb.arm.m-system\">\n");

/* First define types and totals in a whole VL */
for (i = 0; i < ARRAY_SIZE(vec_lanes); i++) {
int count = reg_width / vec_lanes[i].size;
g_string_printf(ts, "svev%c%c", vec_lanes[i].sz, vec_lanes[i].suffix);
g_string_append_printf(s,
"<vector id=\"%s\" type=\"%s\" count=\"%d\"/>",
ts->str, vec_lanes[i].gdb_type, count);
}
/*
* Now define a union for each size group containing unsigned and
* signed and potentially float versions of each size from 128 to
* 8 bits.
*/
for (bits = 128, i = 0; bits >= 8; bits /= 2, i++) {
const char suf[] = { 'q', 'd', 's', 'h', 'b' };
g_string_append_printf(s, "<union id=\"svevn%c\">", suf[i]);
for (j = 0; j < ARRAY_SIZE(vec_lanes); j++) {
if (vec_lanes[j].size == bits) {
g_string_append_printf(s, "<field name=\"%c\" type=\"svev%c%c\"/>",
vec_lanes[j].suffix,
vec_lanes[j].sz, vec_lanes[j].suffix);
}
for (i = 0; i < ARRAY_SIZE(m_sysreg_def); i++) {
if (arm_feature(env, m_sysreg_def[i].feature)) {
g_string_append_printf(s,
"<reg name=\"%s\" bitsize=\"32\" regnum=\"%d\"/>\n",
m_sysreg_def[i].name, base_reg++);
}
g_string_append(s, "</union>");
}
/* And now the final union of unions */
g_string_append(s, "<union id=\"svev\">");
for (bits = 128, i = 0; bits >= 8; bits /= 2, i++) {
const char suf[] = { 'q', 'd', 's', 'h', 'b' };
g_string_append_printf(s, "<field name=\"%c\" type=\"svevn%c\"/>",
suf[i], suf[i]);
}
g_string_append(s, "</union>");

/* Finally the sve prefix type */
g_string_append_printf(s,
"<vector id=\"svep\" type=\"uint8\" count=\"%d\"/>",
reg_width / 8);
g_string_append_printf(s, "</feature>");
cpu->dyn_m_systemreg_xml.desc = g_string_free(s, false);
cpu->dyn_m_systemreg_xml.num = base_reg - orig_base_reg;

return cpu->dyn_m_systemreg_xml.num;
}

#ifndef CONFIG_USER_ONLY
/*
* For user-only, we see the non-secure registers via m_systemreg above.
* For secext, encode the non-secure view as even and secure view as odd.
*/
static int arm_gdb_get_m_secextreg(CPUARMState *env, GByteArray *buf, int reg)
{
return m_sysreg_get(env, buf, reg >> 1, reg & 1);
}

static int arm_gdb_set_m_secextreg(CPUARMState *env, uint8_t *buf, int reg)
{
return 0; /* TODO */
}

static int arm_gen_dynamic_m_secextreg_xml(CPUState *cs, int orig_base_reg)
{
ARMCPU *cpu = ARM_CPU(cs);
GString *s = g_string_new(NULL);
int base_reg = orig_base_reg;
int i;

g_string_printf(s, "<?xml version=\"1.0\"?>");
g_string_append_printf(s, "<!DOCTYPE target SYSTEM \"gdb-target.dtd\">");
g_string_append_printf(s, "<feature name=\"org.gnu.gdb.arm.secext\">\n");

/* Then define each register in parts for each vq */
for (i = 0; i < 32; i++) {
for (i = 0; i < ARRAY_SIZE(m_sysreg_def); i++) {
g_string_append_printf(s,
"<reg name=\"z%d\" bitsize=\"%d\""
" regnum=\"%d\" type=\"svev\"/>",
i, reg_width, base_reg++);
info->num++;
}
/* fpscr & status registers */
g_string_append_printf(s, "<reg name=\"fpsr\" bitsize=\"32\""
" regnum=\"%d\" group=\"float\""
" type=\"int\"/>", base_reg++);
g_string_append_printf(s, "<reg name=\"fpcr\" bitsize=\"32\""
" regnum=\"%d\" group=\"float\""
" type=\"int\"/>", base_reg++);
info->num += 2;

for (i = 0; i < 16; i++) {
"<reg name=\"%s_ns\" bitsize=\"32\" regnum=\"%d\"/>\n",
m_sysreg_def[i].name, base_reg++);
g_string_append_printf(s,
"<reg name=\"p%d\" bitsize=\"%d\""
" regnum=\"%d\" type=\"svep\"/>",
i, cpu->sve_max_vq * 16, base_reg++);
info->num++;
"<reg name=\"%s_s\" bitsize=\"32\" regnum=\"%d\"/>\n",
m_sysreg_def[i].name, base_reg++);
}
g_string_append_printf(s,
"<reg name=\"ffr\" bitsize=\"%d\""
" regnum=\"%d\" group=\"vector\""
" type=\"svep\"/>",
cpu->sve_max_vq * 16, base_reg++);
g_string_append_printf(s,
"<reg name=\"vg\" bitsize=\"64\""
" regnum=\"%d\" type=\"int\"/>",
base_reg++);
info->num += 2;

g_string_append_printf(s, "</feature>");
cpu->dyn_svereg_xml.desc = g_string_free(s, false);
cpu->dyn_m_secextreg_xml.desc = g_string_free(s, false);
cpu->dyn_m_secextreg_xml.num = base_reg - orig_base_reg;

return cpu->dyn_svereg_xml.num;
return cpu->dyn_m_secextreg_xml.num;
}

#endif

const char *arm_gdb_get_dynamic_xml(CPUState *cs, const char *xmlname)
{
Expand All @@ -450,6 +488,12 @@ const char *arm_gdb_get_dynamic_xml(CPUState *cs, const char *xmlname)
return cpu->dyn_sysreg_xml.desc;
} else if (strcmp(xmlname, "sve-registers.xml") == 0) {
return cpu->dyn_svereg_xml.desc;
} else if (strcmp(xmlname, "arm-m-system.xml") == 0) {
return cpu->dyn_m_systemreg_xml.desc;
#ifndef CONFIG_USER_ONLY
} else if (strcmp(xmlname, "arm-m-secext.xml") == 0) {
return cpu->dyn_m_secextreg_xml.desc;
#endif
}
return NULL;
}
Expand All @@ -466,14 +510,20 @@ void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu)
*/
#ifdef TARGET_AARCH64
if (isar_feature_aa64_sve(&cpu->isar)) {
gdb_register_coprocessor(cs, arm_gdb_get_svereg, arm_gdb_set_svereg,
arm_gen_dynamic_svereg_xml(cs, cs->gdb_num_regs),
int nreg = arm_gen_dynamic_svereg_xml(cs, cs->gdb_num_regs);
gdb_register_coprocessor(cs, aarch64_gdb_get_sve_reg,
aarch64_gdb_set_sve_reg, nreg,
"sve-registers.xml", 0);
} else {
gdb_register_coprocessor(cs, aarch64_fpu_gdb_get_reg,
aarch64_fpu_gdb_set_reg,
gdb_register_coprocessor(cs, aarch64_gdb_get_fpu_reg,
aarch64_gdb_set_fpu_reg,
34, "aarch64-fpu.xml", 0);
}
if (isar_feature_aa64_pauth(&cpu->isar)) {
gdb_register_coprocessor(cs, aarch64_gdb_get_pauth_reg,
aarch64_gdb_set_pauth_reg,
4, "aarch64-pauth.xml", 0);
}
#endif
} else {
if (arm_feature(env, ARM_FEATURE_NEON)) {
Expand Down Expand Up @@ -503,4 +553,18 @@ void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu)
arm_gen_dynamic_sysreg_xml(cs, cs->gdb_num_regs),
"system-registers.xml", 0);

if (arm_feature(env, ARM_FEATURE_M)) {
gdb_register_coprocessor(cs,
arm_gdb_get_m_systemreg, arm_gdb_set_m_systemreg,
arm_gen_dynamic_m_systemreg_xml(cs, cs->gdb_num_regs),
"arm-m-system.xml", 0);
#ifndef CONFIG_USER_ONLY
if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
gdb_register_coprocessor(cs,
arm_gdb_get_m_secextreg, arm_gdb_set_m_secextreg,
arm_gen_dynamic_m_secextreg_xml(cs, cs->gdb_num_regs),
"arm-m-secext.xml", 0);
}
#endif
}
}
175 changes: 171 additions & 4 deletions target/arm/gdbstub64.c
Expand Up @@ -72,7 +72,7 @@ int aarch64_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
return 0;
}

int aarch64_fpu_gdb_get_reg(CPUARMState *env, GByteArray *buf, int reg)
int aarch64_gdb_get_fpu_reg(CPUARMState *env, GByteArray *buf, int reg)
{
switch (reg) {
case 0 ... 31:
Expand All @@ -92,7 +92,7 @@ int aarch64_fpu_gdb_get_reg(CPUARMState *env, GByteArray *buf, int reg)
}
}

int aarch64_fpu_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg)
int aarch64_gdb_set_fpu_reg(CPUARMState *env, uint8_t *buf, int reg)
{
switch (reg) {
case 0 ... 31:
Expand All @@ -116,7 +116,7 @@ int aarch64_fpu_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg)
}
}

int arm_gdb_get_svereg(CPUARMState *env, GByteArray *buf, int reg)
int aarch64_gdb_get_sve_reg(CPUARMState *env, GByteArray *buf, int reg)
{
ARMCPU *cpu = env_archcpu(env);

Expand Down Expand Up @@ -164,7 +164,7 @@ int arm_gdb_get_svereg(CPUARMState *env, GByteArray *buf, int reg)
return 0;
}

int arm_gdb_set_svereg(CPUARMState *env, uint8_t *buf, int reg)
int aarch64_gdb_set_sve_reg(CPUARMState *env, uint8_t *buf, int reg)
{
ARMCPU *cpu = env_archcpu(env);

Expand Down Expand Up @@ -209,3 +209,170 @@ int arm_gdb_set_svereg(CPUARMState *env, uint8_t *buf, int reg)

return 0;
}

int aarch64_gdb_get_pauth_reg(CPUARMState *env, GByteArray *buf, int reg)
{
switch (reg) {
case 0: /* pauth_dmask */
case 1: /* pauth_cmask */
case 2: /* pauth_dmask_high */
case 3: /* pauth_cmask_high */
/*
* Note that older versions of this feature only contained
* pauth_{d,c}mask, for use with Linux user processes, and
* thus exclusively in the low half of the address space.
*
* To support system mode, and to debug kernels, two new regs
* were added to cover the high half of the address space.
* For the purpose of pauth_ptr_mask, we can use any well-formed
* address within the address space half -- here, 0 and -1.
*/
{
bool is_data = !(reg & 1);
bool is_high = reg & 2;
uint64_t mask = pauth_ptr_mask(env, -is_high, is_data);
return gdb_get_reg64(buf, mask);
}
default:
return 0;
}
}

int aarch64_gdb_set_pauth_reg(CPUARMState *env, uint8_t *buf, int reg)
{
/* All pseudo registers are read-only. */
return 0;
}

static void output_vector_union_type(GString *s, int reg_width,
const char *name)
{
struct TypeSize {
const char *gdb_type;
short size;
char sz, suffix;
};

static const struct TypeSize vec_lanes[] = {
/* quads */
{ "uint128", 128, 'q', 'u' },
{ "int128", 128, 'q', 's' },
/* 64 bit */
{ "ieee_double", 64, 'd', 'f' },
{ "uint64", 64, 'd', 'u' },
{ "int64", 64, 'd', 's' },
/* 32 bit */
{ "ieee_single", 32, 's', 'f' },
{ "uint32", 32, 's', 'u' },
{ "int32", 32, 's', 's' },
/* 16 bit */
{ "ieee_half", 16, 'h', 'f' },
{ "uint16", 16, 'h', 'u' },
{ "int16", 16, 'h', 's' },
/* bytes */
{ "uint8", 8, 'b', 'u' },
{ "int8", 8, 'b', 's' },
};

static const char suf[] = { 'b', 'h', 's', 'd', 'q' };
int i, j;

/* First define types and totals in a whole VL */
for (i = 0; i < ARRAY_SIZE(vec_lanes); i++) {
g_string_append_printf(s,
"<vector id=\"%s%c%c\" type=\"%s\" count=\"%d\"/>",
name, vec_lanes[i].sz, vec_lanes[i].suffix,
vec_lanes[i].gdb_type, reg_width / vec_lanes[i].size);
}

/*
* Now define a union for each size group containing unsigned and
* signed and potentially float versions of each size from 128 to
* 8 bits.
*/
for (i = 0; i < ARRAY_SIZE(suf); i++) {
int bits = 8 << i;

g_string_append_printf(s, "<union id=\"%sn%c\">", name, suf[i]);
for (j = 0; j < ARRAY_SIZE(vec_lanes); j++) {
if (vec_lanes[j].size == bits) {
g_string_append_printf(s, "<field name=\"%c\" type=\"%s%c%c\"/>",
vec_lanes[j].suffix, name,
vec_lanes[j].sz, vec_lanes[j].suffix);
}
}
g_string_append(s, "</union>");
}

/* And now the final union of unions */
g_string_append_printf(s, "<union id=\"%s\">", name);
for (i = ARRAY_SIZE(suf) - 1; i >= 0; i--) {
g_string_append_printf(s, "<field name=\"%c\" type=\"%sn%c\"/>",
suf[i], name, suf[i]);
}
g_string_append(s, "</union>");
}

int arm_gen_dynamic_svereg_xml(CPUState *cs, int orig_base_reg)
{
ARMCPU *cpu = ARM_CPU(cs);
GString *s = g_string_new(NULL);
DynamicGDBXMLInfo *info = &cpu->dyn_svereg_xml;
int reg_width = cpu->sve_max_vq * 128;
int pred_width = cpu->sve_max_vq * 16;
int base_reg = orig_base_reg;
int i;

g_string_printf(s, "<?xml version=\"1.0\"?>");
g_string_append_printf(s, "<!DOCTYPE target SYSTEM \"gdb-target.dtd\">");
g_string_append_printf(s, "<feature name=\"org.gnu.gdb.aarch64.sve\">");

/* Create the vector union type. */
output_vector_union_type(s, reg_width, "svev");

/* Create the predicate vector type. */
g_string_append_printf(s,
"<vector id=\"svep\" type=\"uint8\" count=\"%d\"/>",
pred_width / 8);

/* Define the vector registers. */
for (i = 0; i < 32; i++) {
g_string_append_printf(s,
"<reg name=\"z%d\" bitsize=\"%d\""
" regnum=\"%d\" type=\"svev\"/>",
i, reg_width, base_reg++);
}

/* fpscr & status registers */
g_string_append_printf(s, "<reg name=\"fpsr\" bitsize=\"32\""
" regnum=\"%d\" group=\"float\""
" type=\"int\"/>", base_reg++);
g_string_append_printf(s, "<reg name=\"fpcr\" bitsize=\"32\""
" regnum=\"%d\" group=\"float\""
" type=\"int\"/>", base_reg++);

/* Define the predicate registers. */
for (i = 0; i < 16; i++) {
g_string_append_printf(s,
"<reg name=\"p%d\" bitsize=\"%d\""
" regnum=\"%d\" type=\"svep\"/>",
i, pred_width, base_reg++);
}
g_string_append_printf(s,
"<reg name=\"ffr\" bitsize=\"%d\""
" regnum=\"%d\" group=\"vector\""
" type=\"svep\"/>",
pred_width, base_reg++);

/* Define the vector length pseudo-register. */
g_string_append_printf(s,
"<reg name=\"vg\" bitsize=\"64\""
" regnum=\"%d\" type=\"int\"/>",
base_reg++);

g_string_append_printf(s, "</feature>");

info->desc = g_string_free(s, false);
info->num = base_reg - orig_base_reg;
return info->num;
}
3 changes: 3 additions & 0 deletions target/arm/helper.c
Expand Up @@ -5787,6 +5787,9 @@ uint64_t arm_hcr_el2_eff_secstate(CPUARMState *env, bool secure)

uint64_t arm_hcr_el2_eff(CPUARMState *env)
{
if (arm_feature(env, ARM_FEATURE_M)) {
return 0;
}
return arm_hcr_el2_eff_secstate(env, arm_is_secure_below_el3(env));
}

Expand Down
34 changes: 30 additions & 4 deletions target/arm/internals.h
Expand Up @@ -1344,16 +1344,32 @@ static inline uint64_t pmu_counter_mask(CPUARMState *env)
}

#ifdef TARGET_AARCH64
int arm_gdb_get_svereg(CPUARMState *env, GByteArray *buf, int reg);
int arm_gdb_set_svereg(CPUARMState *env, uint8_t *buf, int reg);
int aarch64_fpu_gdb_get_reg(CPUARMState *env, GByteArray *buf, int reg);
int aarch64_fpu_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg);
int arm_gen_dynamic_svereg_xml(CPUState *cpu, int base_reg);
int aarch64_gdb_get_sve_reg(CPUARMState *env, GByteArray *buf, int reg);
int aarch64_gdb_set_sve_reg(CPUARMState *env, uint8_t *buf, int reg);
int aarch64_gdb_get_fpu_reg(CPUARMState *env, GByteArray *buf, int reg);
int aarch64_gdb_set_fpu_reg(CPUARMState *env, uint8_t *buf, int reg);
int aarch64_gdb_get_pauth_reg(CPUARMState *env, GByteArray *buf, int reg);
int aarch64_gdb_set_pauth_reg(CPUARMState *env, uint8_t *buf, int reg);
void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp);
void arm_cpu_sme_finalize(ARMCPU *cpu, Error **errp);
void arm_cpu_pauth_finalize(ARMCPU *cpu, Error **errp);
void arm_cpu_lpa2_finalize(ARMCPU *cpu, Error **errp);
#endif

/* Read the CONTROL register as the MRS instruction would. */
uint32_t arm_v7m_mrs_control(CPUARMState *env, uint32_t secure);

/*
* Return a pointer to the location where we currently store the
* stack pointer for the requested security state and thread mode.
* This pointer will become invalid if the CPU state is updated
* such that the stack pointers are switched around (eg changing
* the SPSEL control bit).
*/
uint32_t *arm_v7m_get_sp_ptr(CPUARMState *env, bool secure,
bool threadmode, bool spsel);

#ifdef CONFIG_USER_ONLY
static inline void define_cortex_a72_a57_a53_cp_reginfo(ARMCPU *cpu) { }
#else
Expand All @@ -1367,6 +1383,16 @@ int exception_target_el(CPUARMState *env);
bool arm_singlestep_active(CPUARMState *env);
bool arm_generate_debug_exceptions(CPUARMState *env);

/**
* pauth_ptr_mask:
* @env: cpu context
* @ptr: selects between TTBR0 and TTBR1
* @data: selects between TBI and TBID
*
* Return a mask of the bits of @ptr that contain the authentication code.
*/
uint64_t pauth_ptr_mask(CPUARMState *env, uint64_t ptr, bool data);

/* Add the cpreg definitions for debug related system registers */
void define_debug_regs(ARMCPU *cpu);

Expand Down
173 changes: 97 additions & 76 deletions target/arm/ptw.c
Expand Up @@ -1081,70 +1081,119 @@ static ARMVAParameters aa32_va_parameters(CPUARMState *env, uint32_t va,
* check_s2_mmu_setup
* @cpu: ARMCPU
* @is_aa64: True if the translation regime is in AArch64 state
* @startlevel: Suggested starting level
* @inputsize: Bitsize of IPAs
* @tcr: VTCR_EL2 or VSTCR_EL2
* @ds: Effective value of TCR.DS.
* @iasize: Bitsize of IPAs
* @stride: Page-table stride (See the ARM ARM)
*
* Returns true if the suggested S2 translation parameters are OK and
* false otherwise.
* Decode the starting level of the S2 lookup, returning INT_MIN if
* the configuration is invalid.
*/
static bool check_s2_mmu_setup(ARMCPU *cpu, bool is_aa64, int level,
int inputsize, int stride, int outputsize)
static int check_s2_mmu_setup(ARMCPU *cpu, bool is_aa64, uint64_t tcr,
bool ds, int iasize, int stride)
{
const int grainsize = stride + 3;
int startsizecheck;

/*
* Negative levels are usually not allowed...
* Except for FEAT_LPA2, 4k page table, 52-bit address space, which
* begins with level -1. Note that previous feature tests will have
* eliminated this combination if it is not enabled.
*/
if (level < (inputsize == 52 && stride == 9 ? -1 : 0)) {
return false;
}

startsizecheck = inputsize - ((3 - level) * stride + grainsize);
if (startsizecheck < 1 || startsizecheck > stride + 4) {
return false;
}
int sl0, sl2, startlevel, granulebits, levels;
int s1_min_iasize, s1_max_iasize;

sl0 = extract32(tcr, 6, 2);
if (is_aa64) {
/*
* AArch64.S2InvalidTxSZ: While we checked tsz_oob near the top of
* get_phys_addr_lpae, that used aa64_va_parameters which apply
* to aarch64. If Stage1 is aarch32, the min_txsz is larger.
* See AArch64.S2MinTxSZ, where min_tsz is 24, translated to
* inputsize is 64 - 24 = 40.
*/
if (iasize < 40 && !arm_el_is_aa64(&cpu->env, 1)) {
goto fail;
}

/*
* AArch64.S2InvalidSL: Interpretation of SL depends on the page size,
* so interleave AArch64.S2StartLevel.
*/
switch (stride) {
case 13: /* 64KB Pages. */
if (level == 0 || (level == 1 && outputsize <= 42)) {
return false;
case 9: /* 4KB */
/* SL2 is RES0 unless DS=1 & 4KB granule. */
sl2 = extract64(tcr, 33, 1);
if (ds && sl2) {
if (sl0 != 0) {
goto fail;
}
startlevel = -1;
} else {
startlevel = 2 - sl0;
switch (sl0) {
case 2:
if (arm_pamax(cpu) < 44) {
goto fail;
}
break;
case 3:
if (!cpu_isar_feature(aa64_st, cpu)) {
goto fail;
}
startlevel = 3;
break;
}
}
break;
case 11: /* 16KB Pages. */
if (level == 0 || (level == 1 && outputsize <= 40)) {
return false;
case 11: /* 16KB */
switch (sl0) {
case 2:
if (arm_pamax(cpu) < 42) {
goto fail;
}
break;
case 3:
if (!ds) {
goto fail;
}
break;
}
startlevel = 3 - sl0;
break;
case 9: /* 4KB Pages. */
if (level == 0 && outputsize <= 42) {
return false;
case 13: /* 64KB */
switch (sl0) {
case 2:
if (arm_pamax(cpu) < 44) {
goto fail;
}
break;
case 3:
goto fail;
}
startlevel = 3 - sl0;
break;
default:
g_assert_not_reached();
}

/* Inputsize checks. */
if (inputsize > outputsize &&
(arm_el_is_aa64(&cpu->env, 1) || inputsize > 40)) {
/* This is CONSTRAINED UNPREDICTABLE and we choose to fault. */
return false;
}
} else {
/* AArch32 only supports 4KB pages. Assert on that. */
/*
* Things are simpler for AArch32 EL2, with only 4k pages.
* There is no separate S2InvalidSL function, but AArch32.S2Walk
* begins with walkparms.sl0 in {'1x'}.
*/
assert(stride == 9);

if (level == 0) {
return false;
if (sl0 >= 2) {
goto fail;
}
startlevel = 2 - sl0;
}
return true;

/* AArch{64,32}.S2InconsistentSL are functionally equivalent. */
levels = 3 - startlevel;
granulebits = stride + 3;

s1_min_iasize = levels * stride + granulebits + 1;
s1_max_iasize = s1_min_iasize + (stride - 1) + 4;

if (iasize >= s1_min_iasize && iasize <= s1_max_iasize) {
return startlevel;
}

fail:
return INT_MIN;
}

/**
Expand Down Expand Up @@ -1300,38 +1349,10 @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw,
*/
level = 4 - (inputsize - 4) / stride;
} else {
/*
* For stage 2 translations the starting level is specified by the
* VTCR_EL2.SL0 field (whose interpretation depends on the page size)
*/
uint32_t sl0 = extract32(tcr, 6, 2);
uint32_t sl2 = extract64(tcr, 33, 1);
int32_t startlevel;
bool ok;

/* SL2 is RES0 unless DS=1 & 4kb granule. */
if (param.ds && stride == 9 && sl2) {
if (sl0 != 0) {
level = 0;
goto do_translation_fault;
}
startlevel = -1;
} else if (!aarch64 || stride == 9) {
/* AArch32 or 4KB pages */
startlevel = 2 - sl0;

if (cpu_isar_feature(aa64_st, cpu)) {
startlevel &= 3;
}
} else {
/* 16KB or 64KB pages */
startlevel = 3 - sl0;
}

/* Check that the starting level is valid. */
ok = check_s2_mmu_setup(cpu, aarch64, startlevel,
inputsize, stride, outputsize);
if (!ok) {
int startlevel = check_s2_mmu_setup(cpu, aarch64, tcr, param.ds,
inputsize, stride);
if (startlevel == INT_MIN) {
level = 0;
goto do_translation_fault;
}
level = startlevel;
Expand Down
90 changes: 44 additions & 46 deletions target/arm/tcg/m_helper.c
Expand Up @@ -56,7 +56,7 @@ static uint32_t v7m_mrs_xpsr(CPUARMState *env, uint32_t reg, unsigned el)
return xpsr_read(env) & mask;
}

static uint32_t v7m_mrs_control(CPUARMState *env, uint32_t secure)
uint32_t arm_v7m_mrs_control(CPUARMState *env, uint32_t secure)
{
uint32_t value = env->v7m.control[secure];

Expand Down Expand Up @@ -93,7 +93,7 @@ uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg)
case 0 ... 7: /* xPSR sub-fields */
return v7m_mrs_xpsr(env, reg, 0);
case 20: /* CONTROL */
return v7m_mrs_control(env, 0);
return arm_v7m_mrs_control(env, 0);
default:
/* Unprivileged reads others as zero. */
return 0;
Expand Down Expand Up @@ -650,42 +650,6 @@ void HELPER(v7m_blxns)(CPUARMState *env, uint32_t dest)
arm_rebuild_hflags(env);
}

static uint32_t *get_v7m_sp_ptr(CPUARMState *env, bool secure, bool threadmode,
bool spsel)
{
/*
* Return a pointer to the location where we currently store the
* stack pointer for the requested security state and thread mode.
* This pointer will become invalid if the CPU state is updated
* such that the stack pointers are switched around (eg changing
* the SPSEL control bit).
* Compare the v8M ARM ARM pseudocode LookUpSP_with_security_mode().
* Unlike that pseudocode, we require the caller to pass us in the
* SPSEL control bit value; this is because we also use this
* function in handling of pushing of the callee-saves registers
* part of the v8M stack frame (pseudocode PushCalleeStack()),
* and in the tailchain codepath the SPSEL bit comes from the exception
* return magic LR value from the previous exception. The pseudocode
* opencodes the stack-selection in PushCalleeStack(), but we prefer
* to make this utility function generic enough to do the job.
*/
bool want_psp = threadmode && spsel;

if (secure == env->v7m.secure) {
if (want_psp == v7m_using_psp(env)) {
return &env->regs[13];
} else {
return &env->v7m.other_sp;
}
} else {
if (want_psp) {
return &env->v7m.other_ss_psp;
} else {
return &env->v7m.other_ss_msp;
}
}
}

static bool arm_v7m_load_vector(ARMCPU *cpu, int exc, bool targets_secure,
uint32_t *pvec)
{
Expand Down Expand Up @@ -810,8 +774,8 @@ static bool v7m_push_callee_stack(ARMCPU *cpu, uint32_t lr, bool dotailchain,
!mode;

mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, M_REG_S, priv);
frame_sp_p = get_v7m_sp_ptr(env, M_REG_S, mode,
lr & R_V7M_EXCRET_SPSEL_MASK);
frame_sp_p = arm_v7m_get_sp_ptr(env, M_REG_S, mode,
lr & R_V7M_EXCRET_SPSEL_MASK);
want_psp = mode && (lr & R_V7M_EXCRET_SPSEL_MASK);
if (want_psp) {
limit = env->v7m.psplim[M_REG_S];
Expand Down Expand Up @@ -1656,10 +1620,8 @@ static void do_v7m_exception_exit(ARMCPU *cpu)
* use 'frame_sp_p' after we do something that makes it invalid.
*/
bool spsel = env->v7m.control[return_to_secure] & R_V7M_CONTROL_SPSEL_MASK;
uint32_t *frame_sp_p = get_v7m_sp_ptr(env,
return_to_secure,
!return_to_handler,
spsel);
uint32_t *frame_sp_p = arm_v7m_get_sp_ptr(env, return_to_secure,
!return_to_handler, spsel);
uint32_t frameptr = *frame_sp_p;
bool pop_ok = true;
ARMMMUIdx mmu_idx;
Expand Down Expand Up @@ -1965,7 +1927,7 @@ static bool do_v7m_function_return(ARMCPU *cpu)
threadmode = !arm_v7m_is_handler_mode(env);
spsel = env->v7m.control[M_REG_S] & R_V7M_CONTROL_SPSEL_MASK;

frame_sp_p = get_v7m_sp_ptr(env, true, threadmode, spsel);
frame_sp_p = arm_v7m_get_sp_ptr(env, true, threadmode, spsel);
frameptr = *frame_sp_p;

/*
Expand Down Expand Up @@ -2465,7 +2427,7 @@ uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg)
case 0 ... 7: /* xPSR sub-fields */
return v7m_mrs_xpsr(env, reg, el);
case 20: /* CONTROL */
return v7m_mrs_control(env, env->v7m.secure);
return arm_v7m_mrs_control(env, env->v7m.secure);
case 0x94: /* CONTROL_NS */
/*
* We have to handle this here because unprivileged Secure code
Expand Down Expand Up @@ -2900,3 +2862,39 @@ uint32_t HELPER(v7m_tt)(CPUARMState *env, uint32_t addr, uint32_t op)
}

#endif /* !CONFIG_USER_ONLY */

uint32_t *arm_v7m_get_sp_ptr(CPUARMState *env, bool secure, bool threadmode,
bool spsel)
{
/*
* Return a pointer to the location where we currently store the
* stack pointer for the requested security state and thread mode.
* This pointer will become invalid if the CPU state is updated
* such that the stack pointers are switched around (eg changing
* the SPSEL control bit).
* Compare the v8M ARM ARM pseudocode LookUpSP_with_security_mode().
* Unlike that pseudocode, we require the caller to pass us in the
* SPSEL control bit value; this is because we also use this
* function in handling of pushing of the callee-saves registers
* part of the v8M stack frame (pseudocode PushCalleeStack()),
* and in the tailchain codepath the SPSEL bit comes from the exception
* return magic LR value from the previous exception. The pseudocode
* opencodes the stack-selection in PushCalleeStack(), but we prefer
* to make this utility function generic enough to do the job.
*/
bool want_psp = threadmode && spsel;

if (secure == env->v7m.secure) {
if (want_psp == v7m_using_psp(env)) {
return &env->regs[13];
} else {
return &env->v7m.other_sp;
}
} else {
if (want_psp) {
return &env->v7m.other_ss_psp;
} else {
return &env->v7m.other_ss_msp;
}
}
}
26 changes: 22 additions & 4 deletions target/arm/tcg/pauth_helper.c
Expand Up @@ -339,14 +339,32 @@ static uint64_t pauth_addpac(CPUARMState *env, uint64_t ptr, uint64_t modifier,
return pac | ext | ptr;
}

static uint64_t pauth_original_ptr(uint64_t ptr, ARMVAParameters param)
static uint64_t pauth_ptr_mask_internal(ARMVAParameters param)
{
/* Note that bit 55 is used whether or not the regime has 2 ranges. */
uint64_t extfield = sextract64(ptr, 55, 1);
int bot_pac_bit = 64 - param.tsz;
int top_pac_bit = 64 - 8 * param.tbi;

return deposit64(ptr, bot_pac_bit, top_pac_bit - bot_pac_bit, extfield);
return MAKE_64BIT_MASK(bot_pac_bit, top_pac_bit - bot_pac_bit);
}

static uint64_t pauth_original_ptr(uint64_t ptr, ARMVAParameters param)
{
uint64_t mask = pauth_ptr_mask_internal(param);

/* Note that bit 55 is used whether or not the regime has 2 ranges. */
if (extract64(ptr, 55, 1)) {
return ptr | mask;
} else {
return ptr & ~mask;
}
}

uint64_t pauth_ptr_mask(CPUARMState *env, uint64_t ptr, bool data)
{
ARMMMUIdx mmu_idx = arm_stage1_mmu_idx(env);
ARMVAParameters param = aa64_va_parameters(env, ptr, mmu_idx, data);

return pauth_ptr_mask_internal(param);
}

static uint64_t pauth_auth(CPUARMState *env, uint64_t ptr, uint64_t modifier,
Expand Down