Skip to content

Commit

Permalink
RISC-V: KVM: Add G-stage ioremap() and iounmap() functions
Browse files Browse the repository at this point in the history
The in-kernel AIA IMSIC support can be accelerated by on-demand
mapping / unmapping Guest IMSIC pages to Host VS-level IMSIC pages.
To help achieve this, we add kvm_riscv_stage2_ioremap() and
kvm_riscv_stage2_iounmap() functions.

Also, these new functions for updating G-stage page table mappings
will be called atomic context so the kvm_mmu_topup_memory_cache()
is extended to allow adding pages with GFP_ATOMIC.

Signed-off-by: Anup Patel <anup.patel@wdc.com>
  • Loading branch information
avpatel committed Dec 29, 2021
1 parent 43fbc40 commit 6ad98a7
Show file tree
Hide file tree
Showing 4 changed files with 23 additions and 5 deletions.
5 changes: 5 additions & 0 deletions arch/riscv/include/asm/kvm_host.h
Expand Up @@ -221,6 +221,11 @@ void __kvm_riscv_hfence_gvma_vmid(unsigned long vmid);
void __kvm_riscv_hfence_gvma_gpa(unsigned long gpa_divby_4);
void __kvm_riscv_hfence_gvma_all(void);

int kvm_riscv_stage2_ioremap(struct kvm *kvm, gpa_t gpa,
phys_addr_t hpa, unsigned long size,
bool writable, bool in_atomic);
void kvm_riscv_stage2_iounmap(struct kvm *kvm, gpa_t gpa,
unsigned long size);
int kvm_riscv_stage2_map(struct kvm_vcpu *vcpu,
struct kvm_memory_slot *memslot,
gpa_t gpa, unsigned long hva, bool is_write);
Expand Down
18 changes: 14 additions & 4 deletions arch/riscv/kvm/mmu.c
Expand Up @@ -342,8 +342,9 @@ static void stage2_wp_memory_region(struct kvm *kvm, int slot)
kvm_flush_remote_tlbs(kvm);
}

static int stage2_ioremap(struct kvm *kvm, gpa_t gpa, phys_addr_t hpa,
unsigned long size, bool writable)
int kvm_riscv_stage2_ioremap(struct kvm *kvm, gpa_t gpa,
phys_addr_t hpa, unsigned long size,
bool writable, bool in_atomic)
{
pte_t pte;
int ret = 0;
Expand All @@ -352,6 +353,7 @@ static int stage2_ioremap(struct kvm *kvm, gpa_t gpa, phys_addr_t hpa,
struct kvm_mmu_memory_cache pcache;

memset(&pcache, 0, sizeof(pcache));
pcache.gfp_atomic = GFP_ATOMIC | __GFP_ACCOUNT;
pcache.gfp_zero = __GFP_ZERO;

end = (gpa + size + PAGE_SIZE - 1) & PAGE_MASK;
Expand Down Expand Up @@ -381,6 +383,13 @@ static int stage2_ioremap(struct kvm *kvm, gpa_t gpa, phys_addr_t hpa,
return ret;
}

void kvm_riscv_stage2_iounmap(struct kvm *kvm, gpa_t gpa, unsigned long size)
{
spin_lock(&kvm->mmu_lock);
stage2_unmap_range(kvm, gpa, size, false);
spin_unlock(&kvm->mmu_lock);
}

void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
struct kvm_memory_slot *slot,
gfn_t gfn_offset,
Expand Down Expand Up @@ -512,8 +521,9 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
goto out;
}

ret = stage2_ioremap(kvm, gpa, pa,
vm_end - vm_start, writable);
ret = kvm_riscv_stage2_ioremap(kvm, gpa, pa,
vm_end - vm_start,
writable, false);
if (ret)
break;
}
Expand Down
1 change: 1 addition & 0 deletions include/linux/kvm_types.h
Expand Up @@ -64,6 +64,7 @@ struct gfn_to_hva_cache {
struct kvm_mmu_memory_cache {
int nobjs;
gfp_t gfp_zero;
gfp_t gfp_atomic;
struct kmem_cache *kmem_cache;
void *objects[KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE];
};
Expand Down
4 changes: 3 additions & 1 deletion virt/kvm/kvm_main.c
Expand Up @@ -378,7 +378,9 @@ int kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int min)
if (mc->nobjs >= min)
return 0;
while (mc->nobjs < ARRAY_SIZE(mc->objects)) {
obj = mmu_memory_cache_alloc_obj(mc, GFP_KERNEL_ACCOUNT);
obj = mmu_memory_cache_alloc_obj(mc, (mc->gfp_atomic) ?
mc->gfp_atomic :
GFP_KERNEL_ACCOUNT);
if (!obj)
return mc->nobjs >= min ? 0 : -ENOMEM;
mc->objects[mc->nobjs++] = obj;
Expand Down

0 comments on commit 6ad98a7

Please sign in to comment.