Skip to content

Commit

Permalink
RISC-V: KVM: Add G-stage ioremap() and iounmap() functions
Browse files Browse the repository at this point in the history
The in-kernel AIA IMSIC support can be accelerated by on-demand
mapping / unmapping Guest IMSIC pages to Host VS-level IMSIC pages.
To help achieve this, we add kvm_riscv_stage2_ioremap() and
kvm_riscv_stage2_iounmap() functions.

Also, these new functions for updating G-stage page table mappings
will be called atomic context so the kvm_mmu_topup_memory_cache()
is extended to allow adding pages with GFP_ATOMIC.

Signed-off-by: Anup Patel <apatel@ventanamicro.com>
  • Loading branch information
avpatel committed May 10, 2022
1 parent ade97f9 commit 2e1468f
Show file tree
Hide file tree
Showing 4 changed files with 23 additions and 5 deletions.
5 changes: 5 additions & 0 deletions arch/riscv/include/asm/kvm_host.h
Expand Up @@ -295,6 +295,11 @@ void kvm_riscv_hfence_vvma_gva(struct kvm *kvm,
void kvm_riscv_hfence_vvma_all(struct kvm *kvm,
unsigned long hbase, unsigned long hmask);

int kvm_riscv_gstage_ioremap(struct kvm *kvm, gpa_t gpa,
phys_addr_t hpa, unsigned long size,
bool writable, bool in_atomic);
void kvm_riscv_gstage_iounmap(struct kvm *kvm, gpa_t gpa,
unsigned long size);
int kvm_riscv_gstage_map(struct kvm_vcpu *vcpu,
struct kvm_memory_slot *memslot,
gpa_t gpa, unsigned long hva, bool is_write);
Expand Down
18 changes: 14 additions & 4 deletions arch/riscv/kvm/mmu.c
Expand Up @@ -343,8 +343,9 @@ static void gstage_wp_memory_region(struct kvm *kvm, int slot)
kvm_flush_remote_tlbs(kvm);
}

static int gstage_ioremap(struct kvm *kvm, gpa_t gpa, phys_addr_t hpa,
unsigned long size, bool writable)
int kvm_riscv_gstage_ioremap(struct kvm *kvm, gpa_t gpa,
phys_addr_t hpa, unsigned long size,
bool writable, bool in_atomic)
{
pte_t pte;
int ret = 0;
Expand All @@ -353,6 +354,7 @@ static int gstage_ioremap(struct kvm *kvm, gpa_t gpa, phys_addr_t hpa,
struct kvm_mmu_memory_cache pcache;

memset(&pcache, 0, sizeof(pcache));
pcache.gfp_atomic = GFP_ATOMIC | __GFP_ACCOUNT;
pcache.gfp_zero = __GFP_ZERO;

end = (gpa + size + PAGE_SIZE - 1) & PAGE_MASK;
Expand Down Expand Up @@ -382,6 +384,13 @@ static int gstage_ioremap(struct kvm *kvm, gpa_t gpa, phys_addr_t hpa,
return ret;
}

void kvm_riscv_gstage_iounmap(struct kvm *kvm, gpa_t gpa, unsigned long size)
{
spin_lock(&kvm->mmu_lock);
gstage_unmap_range(kvm, gpa, size, false);
spin_unlock(&kvm->mmu_lock);
}

void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
struct kvm_memory_slot *slot,
gfn_t gfn_offset,
Expand Down Expand Up @@ -517,8 +526,9 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
goto out;
}

ret = gstage_ioremap(kvm, gpa, pa,
vm_end - vm_start, writable);
ret = kvm_riscv_gstage_ioremap(kvm, gpa, pa,
vm_end - vm_start,
writable, false);
if (ret)
break;
}
Expand Down
1 change: 1 addition & 0 deletions include/linux/kvm_types.h
Expand Up @@ -87,6 +87,7 @@ struct gfn_to_pfn_cache {
struct kvm_mmu_memory_cache {
int nobjs;
gfp_t gfp_zero;
gfp_t gfp_atomic;
struct kmem_cache *kmem_cache;
void *objects[KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE];
};
Expand Down
4 changes: 3 additions & 1 deletion virt/kvm/kvm_main.c
Expand Up @@ -386,7 +386,9 @@ int kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int min)
if (mc->nobjs >= min)
return 0;
while (mc->nobjs < ARRAY_SIZE(mc->objects)) {
obj = mmu_memory_cache_alloc_obj(mc, GFP_KERNEL_ACCOUNT);
obj = mmu_memory_cache_alloc_obj(mc, (mc->gfp_atomic) ?
mc->gfp_atomic :
GFP_KERNEL_ACCOUNT);
if (!obj)
return mc->nobjs >= min ? 0 : -ENOMEM;
mc->objects[mc->nobjs++] = obj;
Expand Down

0 comments on commit 2e1468f

Please sign in to comment.