Skip to content

Commit

Permalink
RISC-V: KVM: Use SBI sync SRET call when available
Browse files Browse the repository at this point in the history
We implement an optimized KVM world-switch using SBI sync SRET call
when SBI nested acceleration extension is available. This improves
KVM world-switch when KVM RISC-V is running as a Guest under some
other hypervisor.

Signed-off-by: Anup Patel <apatel@ventanamicro.com>
  • Loading branch information
avpatel committed Mar 23, 2023
1 parent 1c2699a commit 5e80bb1
Show file tree
Hide file tree
Showing 3 changed files with 146 additions and 16 deletions.
32 changes: 32 additions & 0 deletions arch/riscv/include/asm/kvm_nacl.h
Expand Up @@ -12,6 +12,8 @@
#include <asm/csr.h>
#include <asm/sbi.h>

struct kvm_vcpu_arch;

DECLARE_STATIC_KEY_FALSE(kvm_riscv_nacl_available);
#define kvm_riscv_nacl_available() \
static_branch_unlikely(&kvm_riscv_nacl_available)
Expand Down Expand Up @@ -43,6 +45,10 @@ void __kvm_riscv_nacl_hfence(void *shmem,
unsigned long page_num,
unsigned long page_count);

void __kvm_riscv_nacl_switch_to(struct kvm_vcpu_arch *vcpu_arch,
unsigned long sbi_ext_id,
unsigned long sbi_func_id);

int kvm_riscv_nacl_enable(void);

void kvm_riscv_nacl_disable(void);
Expand All @@ -64,6 +70,32 @@ int kvm_riscv_nacl_init(void);
#define nacl_shmem_fast() \
(kvm_riscv_nacl_available() ? nacl_shmem() : NULL)

#define nacl_shmem_scratch_read_long(__s, __o) \
({ \
unsigned long *__p = (__s) + \
SBI_NACL_SHMEM_SCRATCH_OFFSET + \
(__o); \
lelong_to_cpu(*__p); \
})

#define nacl_shmem_scratch_write_long(__s, __o, __v) \
do { \
unsigned long *__p = (__s) + \
SBI_NACL_SHMEM_SCRATCH_OFFSET + \
(__o); \
*__p = cpu_to_lelong(__v); \
} while (0)

#define nacl_shmem_scratch_write_longs(__s, __o, __a, __c) \
do { \
unsigned int __i; \
unsigned long *__p = (__s) + \
SBI_NACL_SHMEM_SCRATCH_OFFSET + \
(__o); \
for (__i = 0; __i < (__c); __i++) \
__p[__i] = cpu_to_lelong((__a)[__i]); \
} while (0)

#define nacl_shmem_sync_hfence(__e) \
do { \
sbi_ecall(SBI_EXT_NACL, SBI_EXT_NACL_SYNC_HFENCE, \
Expand Down
48 changes: 44 additions & 4 deletions arch/riscv/kvm/vcpu.c
Expand Up @@ -1112,18 +1112,58 @@ static void kvm_riscv_update_hvip(struct kvm_vcpu *vcpu)
*/
static void noinstr kvm_riscv_vcpu_enter_exit(struct kvm_vcpu *vcpu)
{
void *nshmem;
struct kvm_cpu_context *gcntx = &vcpu->arch.guest_context;
struct kvm_cpu_context *hcntx = &vcpu->arch.host_context;

guest_state_enter_irqoff();

hcntx->hstatus = nacl_csr_swap(CSR_HSTATUS, gcntx->hstatus);
if (kvm_riscv_nacl_sync_sret_available()) {
nshmem = nacl_shmem();

nacl_sync_csr(-1UL);
if (kvm_riscv_nacl_autoswap_csr_available()) {
hcntx->hstatus =
nacl_shmem_csr_read(nshmem, CSR_HSTATUS);
nacl_shmem_scratch_write_long(nshmem,
SBI_NACL_SHMEM_AUTOSWAP_OFFSET +
SBI_NACL_SHMEM_AUTOSWAP_HSTATUS,
gcntx->hstatus);
nacl_shmem_scratch_write_long(nshmem,
SBI_NACL_SHMEM_AUTOSWAP_OFFSET,
SBI_NACL_SHMEM_AUTOSWAP_FLAG_HSTATUS);
} else if (kvm_riscv_nacl_sync_csr_available()) {
hcntx->hstatus = nacl_shmem_csr_swap(nshmem,
CSR_HSTATUS, gcntx->hstatus);
} else {
hcntx->hstatus = csr_swap(CSR_HSTATUS, gcntx->hstatus);
}

__kvm_riscv_switch_to(&vcpu->arch);
nacl_shmem_scratch_write_longs(nshmem,
SBI_NACL_SHMEM_SRET_OFFSET +
SBI_NACL_SHMEM_SRET_X(1),
&gcntx->ra,
SBI_NACL_SHMEM_SRET_X_LAST);

__kvm_riscv_nacl_switch_to(&vcpu->arch, SBI_EXT_NACL,
SBI_EXT_NACL_SYNC_SRET);

if (kvm_riscv_nacl_autoswap_csr_available()) {
nacl_shmem_scratch_write_long(nshmem,
SBI_NACL_SHMEM_AUTOSWAP_OFFSET,
0);
gcntx->hstatus = nacl_shmem_scratch_read_long(nshmem,
SBI_NACL_SHMEM_AUTOSWAP_OFFSET +
SBI_NACL_SHMEM_AUTOSWAP_HSTATUS);
} else {
gcntx->hstatus = csr_swap(CSR_HSTATUS, hcntx->hstatus);
}
} else {
hcntx->hstatus = csr_swap(CSR_HSTATUS, gcntx->hstatus);

gcntx->hstatus = csr_swap(CSR_HSTATUS, hcntx->hstatus);
__kvm_riscv_switch_to(&vcpu->arch);

gcntx->hstatus = csr_swap(CSR_HSTATUS, hcntx->hstatus);
}

vcpu->arch.last_exit_cpu = vcpu->cpu;
guest_state_exit_irqoff();
Expand Down
82 changes: 70 additions & 12 deletions arch/riscv/kvm/vcpu_switch.S
Expand Up @@ -11,11 +11,7 @@
#include <asm/asm-offsets.h>
#include <asm/csr.h>

.text
.altmacro
.option norelax

ENTRY(__kvm_riscv_switch_to)
.macro SAVE_HOST_GPRS
/* Save Host GPRs (except A0 and T0-T6) */
REG_S ra, (KVM_ARCH_HOST_RA)(a0)
REG_S sp, (KVM_ARCH_HOST_SP)(a0)
Expand All @@ -40,11 +36,13 @@ ENTRY(__kvm_riscv_switch_to)
REG_S s9, (KVM_ARCH_HOST_S9)(a0)
REG_S s10, (KVM_ARCH_HOST_S10)(a0)
REG_S s11, (KVM_ARCH_HOST_S11)(a0)
.endm

.macro SAVE_HOST_AND_RESTORE_GUEST_CSRS __resume_addr
/* Load Guest CSR values */
REG_L t0, (KVM_ARCH_GUEST_SSTATUS)(a0)
REG_L t1, (KVM_ARCH_GUEST_SCOUNTEREN)(a0)
la t3, __kvm_switch_return
la t3, \__resume_addr
REG_L t4, (KVM_ARCH_GUEST_SEPC)(a0)

/* Save Host and Restore Guest SSTATUS */
Expand All @@ -67,7 +65,9 @@ ENTRY(__kvm_riscv_switch_to)
REG_S t1, (KVM_ARCH_HOST_SCOUNTEREN)(a0)
REG_S t2, (KVM_ARCH_HOST_SSCRATCH)(a0)
REG_S t3, (KVM_ARCH_HOST_STVEC)(a0)
.endm

.macro RESTORE_GUEST_GPRS
/* Restore Guest GPRs (except A0) */
REG_L ra, (KVM_ARCH_GUEST_RA)(a0)
REG_L sp, (KVM_ARCH_GUEST_SP)(a0)
Expand Down Expand Up @@ -102,13 +102,9 @@ ENTRY(__kvm_riscv_switch_to)

/* Restore Guest A0 */
REG_L a0, (KVM_ARCH_GUEST_A0)(a0)
.endm

/* Resume Guest */
sret

/* Back to Host */
.align 2
__kvm_switch_return:
.macro SAVE_GUEST_GPRS
/* Swap Guest A0 with SSCRATCH */
csrrw a0, CSR_SSCRATCH, a0

Expand Down Expand Up @@ -143,7 +139,9 @@ __kvm_switch_return:
REG_S t4, (KVM_ARCH_GUEST_T4)(a0)
REG_S t5, (KVM_ARCH_GUEST_T5)(a0)
REG_S t6, (KVM_ARCH_GUEST_T6)(a0)
.endm

.macro SAVE_GUEST_AND_RESTORE_HOST_CSRS
/* Load Host CSR values */
REG_L t1, (KVM_ARCH_HOST_STVEC)(a0)
REG_L t2, (KVM_ARCH_HOST_SSCRATCH)(a0)
Expand All @@ -170,7 +168,9 @@ __kvm_switch_return:
REG_S t2, (KVM_ARCH_GUEST_A0)(a0)
REG_S t3, (KVM_ARCH_GUEST_SCOUNTEREN)(a0)
REG_S t4, (KVM_ARCH_GUEST_SSTATUS)(a0)
.endm

.macro RESTORE_HOST_GPRS
/* Restore Host GPRs (except A0 and T0-T6) */
REG_L ra, (KVM_ARCH_HOST_RA)(a0)
REG_L sp, (KVM_ARCH_HOST_SP)(a0)
Expand All @@ -195,6 +195,64 @@ __kvm_switch_return:
REG_L s9, (KVM_ARCH_HOST_S9)(a0)
REG_L s10, (KVM_ARCH_HOST_S10)(a0)
REG_L s11, (KVM_ARCH_HOST_S11)(a0)
.endm

.text
.altmacro
.option norelax

/*
* Parameters:
* A0 <= Pointer to struct kvm_vcpu_arch
*/
ENTRY(__kvm_riscv_switch_to)
SAVE_HOST_GPRS

SAVE_HOST_AND_RESTORE_GUEST_CSRS __kvm_riscv_switch_to_return

RESTORE_GUEST_GPRS

/* Resume Guest using SRET */
sret

/* Back to Host */
.align 2
__kvm_riscv_switch_to_return:
SAVE_GUEST_GPRS

SAVE_GUEST_AND_RESTORE_HOST_CSRS

RESTORE_HOST_GPRS

/* Return to C code */
ret
ENDPROC(__kvm_riscv_switch_to)

/*
* Parameters:
* A0 <= Pointer to struct kvm_vcpu_arch
* A1 <= SBI extension ID
* A2 <= SBI function ID
* A3 <= Flags for SBI sync SRET call
*/
ENTRY(__kvm_riscv_nacl_switch_to)
SAVE_HOST_GPRS

SAVE_HOST_AND_RESTORE_GUEST_CSRS __kvm_riscv_nacl_switch_to_return

/* Resume Guest using SBI nested acceleration */
add a6, a2, zero
add a7, a1, zero
ecall

/* Back to Host */
.align 2
__kvm_riscv_nacl_switch_to_return:
SAVE_GUEST_GPRS

SAVE_GUEST_AND_RESTORE_HOST_CSRS

RESTORE_HOST_GPRS

/* Return to C code */
ret
Expand Down

0 comments on commit 5e80bb1

Please sign in to comment.