Skip to content

Commit

Permalink
RISC-V: KVM: Implement guest external interrupt line management
Browse files Browse the repository at this point in the history
The RISC-V host will have one guest external interrupt line for each
VS-level IMSICs associated with a HART. The guest external interrupt
lines is per-HART a per-HART resource and hypervisor can use HGEIE,
HGEIP, and HIE CSRs to manage these guest external interrupt lines.

Signed-off-by: Anup Patel <anup.patel@wdc.com>
  • Loading branch information
avpatel committed Aug 30, 2021
1 parent 7430b3e commit fbcf335
Show file tree
Hide file tree
Showing 2 changed files with 222 additions and 12 deletions.
19 changes: 7 additions & 12 deletions arch/riscv/include/asm/kvm_aia.h
Expand Up @@ -33,6 +33,10 @@ DECLARE_STATIC_KEY_FALSE(kvm_riscv_aia_available);
#define kvm_riscv_aia_available() \
static_branch_unlikely(&kvm_riscv_aia_available)

static inline void kvm_riscv_vcpu_aia_imsic_release(struct kvm_vcpu *vcpu)
{
}

static inline void kvm_riscv_vcpu_aia_flush_interrupts(struct kvm_vcpu *vcpu)
{
}
Expand Down Expand Up @@ -121,18 +125,9 @@ static inline void kvm_riscv_aia_destroy_vm(struct kvm *kvm)
{
}

static inline int kvm_riscv_aia_alloc_hgei(int cpu, struct kvm_vcpu *owner)
{
return 0;
}

static inline void kvm_riscv_aia_free_hgei(int cpu, int hgei)
{
}

static inline void kvm_riscv_aia_wakeon_hgei(struct kvm_vcpu *owner)
{
}
int kvm_riscv_aia_alloc_hgei(int cpu, struct kvm_vcpu *owner);
void kvm_riscv_aia_free_hgei(int cpu, int hgei);
void kvm_riscv_aia_wakeon_hgei(struct kvm_vcpu *owner);

void kvm_riscv_aia_enable(void);
void kvm_riscv_aia_disable(void);
Expand Down
215 changes: 215 additions & 0 deletions arch/riscv/kvm/aia.c
Expand Up @@ -15,10 +15,39 @@
#include <linux/spinlock.h>
#include <asm/hwcap.h>

struct aia_hgei_control {
raw_spinlock_t lock;
unsigned long free_bitmap;
struct kvm_vcpu *owners[BITS_PER_LONG];
};
static DEFINE_PER_CPU(struct aia_hgei_control, aia_hgei);
static int hgei_parent_irq;

unsigned int kvm_riscv_aia_nr_hgei;
unsigned int kvm_riscv_aia_max_ids;
DEFINE_STATIC_KEY_FALSE(kvm_riscv_aia_available);

static int aia_find_hgei(struct kvm_vcpu *owner)
{
int i, hgei;
unsigned long flags;
struct aia_hgei_control *hgctrl = this_cpu_ptr(&aia_hgei);

raw_spin_lock_irqsave(&hgctrl->lock, flags);

hgei = -1;
for (i = 1; i <= kvm_riscv_aia_nr_hgei; i++) {
if (hgctrl->owners[i] == owner) {
hgei = i;
break;
}
}

raw_spin_unlock_irqrestore(&hgctrl->lock, flags);

return hgei;
}

static inline void aia_set_hvictl(bool ext_irq_pending)
{
unsigned long hvictl;
Expand All @@ -33,6 +62,150 @@ static inline void aia_set_hvictl(bool ext_irq_pending)
csr_write(CSR_HVICTL, hvictl);
}

int kvm_riscv_aia_alloc_hgei(int cpu, struct kvm_vcpu *owner)
{
int ret = -ENOENT;
unsigned long flags;
struct aia_hgei_control *hgctrl = per_cpu_ptr(&aia_hgei, cpu);

if (!kvm_riscv_aia_available())
return -ENOSYS;
if (!hgctrl)
return -ENODEV;

raw_spin_lock_irqsave(&hgctrl->lock, flags);

if (hgctrl->free_bitmap) {
ret = __ffs(hgctrl->free_bitmap);
hgctrl->free_bitmap &= ~BIT(ret);
hgctrl->owners[ret] = owner;
}

raw_spin_unlock_irqrestore(&hgctrl->lock, flags);

return ret;
}

void kvm_riscv_aia_free_hgei(int cpu, int hgei)
{
unsigned long flags;
struct aia_hgei_control *hgctrl = per_cpu_ptr(&aia_hgei, cpu);

if (!kvm_riscv_aia_available() || !hgctrl)
return;

raw_spin_lock_irqsave(&hgctrl->lock, flags);

if (0 < hgei && hgei <= kvm_riscv_aia_nr_hgei) {
if (!(hgctrl->free_bitmap & BIT(hgei))) {
hgctrl->free_bitmap |= BIT(hgei);
hgctrl->owners[hgei] = NULL;
}
}

raw_spin_unlock_irqrestore(&hgctrl->lock, flags);
}

void kvm_riscv_aia_wakeon_hgei(struct kvm_vcpu *owner)
{
int hgei;

if (!kvm_riscv_aia_available())
return;

hgei = aia_find_hgei(owner);
if (hgei > 0)
csr_set(CSR_HGEIE, BIT(hgei));
}

static irqreturn_t hgei_interrupt(int irq, void *dev_id)
{
int i;
unsigned long hgei_mask, flags;
struct aia_hgei_control *hgctrl = this_cpu_ptr(&aia_hgei);

hgei_mask = csr_read(CSR_HGEIP) & csr_read(CSR_HGEIE);
csr_clear(CSR_HGEIE, hgei_mask);

raw_spin_lock_irqsave(&hgctrl->lock, flags);

for_each_set_bit(i, &hgei_mask, BITS_PER_LONG) {
if (hgctrl->owners[i])
kvm_vcpu_kick(hgctrl->owners[i]);
}

raw_spin_unlock_irqrestore(&hgctrl->lock, flags);

return IRQ_HANDLED;
}

static int aia_hgei_init(void)
{
int cpu, rc;
struct irq_fwspec sgei;
struct aia_hgei_control *hgctrl;
struct irq_domain *domain = NULL;
struct device_node *cpun, *child;

/* Initialize per-CPU guest external interrupt line managment */
for_each_possible_cpu(cpu) {
hgctrl = per_cpu_ptr(&aia_hgei, cpu);
raw_spin_lock_init(&hgctrl->lock);
if (kvm_riscv_aia_nr_hgei) {
hgctrl->free_bitmap =
BIT(kvm_riscv_aia_nr_hgei + 1) - 1;
hgctrl->free_bitmap &= ~BIT(0);
} else
hgctrl->free_bitmap = 0;
}

/* Find the INTC interrupt domain */
for_each_of_cpu_node(cpun) {
child = of_get_compatible_child(cpun, "riscv,cpu-intc");
if (!child) {
kvm_err("failed to find INTC node [%pOF]\n", cpun);
return -ENODEV;
}

domain = irq_find_host(child);
of_node_put(child);
if (domain)
break;
}
if (!domain) {
kvm_err("can't find INTC IRQ domain\n");
return -ENODEV;
}

/* Get per-CPU SGEI interrupt */
sgei.fwnode = domain->fwnode;
sgei.param_count = 1;
sgei.param[0] = IRQ_S_GEXT;
hgei_parent_irq = __irq_domain_alloc_irqs(domain, -1, 1,
NUMA_NO_NODE, &sgei,
false, NULL);
if (hgei_parent_irq <= 0) {
kvm_err("unable to alloc SGEI IRQ\n");
return -ENOMEM;
}

/* Request per-CPU SGEI interrupt */
rc = request_percpu_irq(hgei_parent_irq, hgei_interrupt,
"riscv-kvm", &aia_hgei);
if (rc) {
kvm_err("failed to request SGEI IRQ\n");
return rc;
}

return 0;
}

static void aia_hgei_exit(void)
{
/* Free per-CPU SGEI interrupt */
free_irq(hgei_parent_irq, &aia_hgei);
}

void kvm_riscv_aia_enable(void)
{
if (!kvm_riscv_aia_available())
Expand All @@ -47,18 +220,50 @@ void kvm_riscv_aia_enable(void)
csr_write(CSR_HVIPRIO1H, 0x0);
csr_write(CSR_HVIPRIO2H, 0x0);
#endif

/* Enable per-CPU SGEI interrupt */
enable_percpu_irq(hgei_parent_irq,
irq_get_trigger_type(hgei_parent_irq));
csr_set(CSR_HIE, BIT(IRQ_S_GEXT));
}

void kvm_riscv_aia_disable(void)
{
int i;
unsigned long flags;
struct aia_hgei_control *hgctrl = this_cpu_ptr(&aia_hgei);

if (!kvm_riscv_aia_available())
return;

/* Disable per-CPU SGEI interrupt */
csr_clear(CSR_HIE, BIT(IRQ_S_GEXT));
disable_percpu_irq(hgei_parent_irq);

aia_set_hvictl(false);

raw_spin_lock_irqsave(&hgctrl->lock, flags);

for (i = 0; i <= kvm_riscv_aia_nr_hgei; i++) {
if (!hgctrl->owners[i])
continue;

/*
* We release hgctrl->lock before notifying IMSIC
* release_hgei() so that we don't have lock ordering
* issues.
*/
raw_spin_unlock_irqrestore(&hgctrl->lock, flags);
kvm_riscv_vcpu_aia_imsic_release(hgctrl->owners[i]);
raw_spin_lock_irqsave(&hgctrl->lock, flags);
}

raw_spin_unlock_irqrestore(&hgctrl->lock, flags);
}

int kvm_riscv_aia_init(void)
{
int rc;
unsigned int hgeie_bits;

if (!riscv_aia_available)
Expand All @@ -79,6 +284,11 @@ int kvm_riscv_aia_init(void)
if (kvm_riscv_aia_nr_hgei)
kvm_riscv_aia_max_ids = imsic_num_ids() + 1;

/* Initialize guest external interrupt line managment */
rc = aia_hgei_init();
if (rc)
return rc;

/* Enable KVM AIA support */
static_branch_enable(&kvm_riscv_aia_available);

Expand All @@ -87,4 +297,9 @@ int kvm_riscv_aia_init(void)

void kvm_riscv_aia_exit(void)
{
if (!kvm_riscv_aia_available())
return;

/* Cleanup the HGEI state */
aia_hgei_exit();
}

0 comments on commit fbcf335

Please sign in to comment.