Skip to content

Commit 1df8dca

Browse files
lixianglaigregkh
authored andcommitted
LoongArch: KVM: Compile switch.S directly into the kernel
commit 5203012 upstream. If we directly compile the switch.S file into the kernel, the address of the kvm_exc_entry function will definitely be within the DMW memory area. Therefore, we will no longer need to perform a copy relocation of the kvm_exc_entry. So this patch compiles switch.S directly into the kernel, and then remove the copy relocation execution logic for the kvm_exc_entry function. Cc: stable@vger.kernel.org Signed-off-by: Xianglai Li <lixianglai@loongson.cn> Signed-off-by: Huacai Chen <chenhuacai@loongson.cn> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
1 parent 2fd54dd commit 1df8dca

6 files changed

Lines changed: 41 additions & 42 deletions

File tree

arch/loongarch/Kbuild

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@ obj-y += mm/
33
obj-y += net/
44
obj-y += vdso/
55

6-
obj-$(CONFIG_KVM) += kvm/
6+
obj-$(subst m,y,$(CONFIG_KVM)) += kvm/
77

88
# for cleaning
99
subdir- += boot

arch/loongarch/include/asm/asm-prototypes.h

Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -20,3 +20,23 @@ asmlinkage void noinstr __no_stack_protector ret_from_kernel_thread(struct task_
2020
struct pt_regs *regs,
2121
int (*fn)(void *),
2222
void *fn_arg);
23+
24+
struct kvm_run;
25+
struct kvm_vcpu;
26+
struct loongarch_fpu;
27+
28+
void kvm_exc_entry(void);
29+
int kvm_enter_guest(struct kvm_run *run, struct kvm_vcpu *vcpu);
30+
31+
void kvm_save_fpu(struct loongarch_fpu *fpu);
32+
void kvm_restore_fpu(struct loongarch_fpu *fpu);
33+
34+
#ifdef CONFIG_CPU_HAS_LSX
35+
void kvm_save_lsx(struct loongarch_fpu *fpu);
36+
void kvm_restore_lsx(struct loongarch_fpu *fpu);
37+
#endif
38+
39+
#ifdef CONFIG_CPU_HAS_LASX
40+
void kvm_save_lasx(struct loongarch_fpu *fpu);
41+
void kvm_restore_lasx(struct loongarch_fpu *fpu);
42+
#endif

arch/loongarch/include/asm/kvm_host.h

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -86,7 +86,6 @@ struct kvm_context {
8686
struct kvm_world_switch {
8787
int (*exc_entry)(void);
8888
int (*enter_guest)(struct kvm_run *run, struct kvm_vcpu *vcpu);
89-
unsigned long page_order;
9089
};
9190

9291
#define MAX_PGTABLE_LEVELS 4
@@ -356,8 +355,6 @@ void kvm_exc_entry(void);
356355
int kvm_enter_guest(struct kvm_run *run, struct kvm_vcpu *vcpu);
357356

358357
extern unsigned long vpid_mask;
359-
extern const unsigned long kvm_exception_size;
360-
extern const unsigned long kvm_enter_guest_size;
361358
extern struct kvm_world_switch *kvm_loongarch_ops;
362359

363360
#define SW_GCSR (1 << 0)

arch/loongarch/kvm/Makefile

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,11 +7,12 @@ include $(srctree)/virt/kvm/Makefile.kvm
77

88
obj-$(CONFIG_KVM) += kvm.o
99

10+
obj-y += switch.o
11+
1012
kvm-y += exit.o
1113
kvm-y += interrupt.o
1214
kvm-y += main.o
1315
kvm-y += mmu.o
14-
kvm-y += switch.o
1516
kvm-y += timer.o
1617
kvm-y += tlb.o
1718
kvm-y += vcpu.o

arch/loongarch/kvm/main.c

Lines changed: 3 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -348,8 +348,7 @@ void kvm_arch_disable_virtualization_cpu(void)
348348

349349
static int kvm_loongarch_env_init(void)
350350
{
351-
int cpu, order, ret;
352-
void *addr;
351+
int cpu, ret;
353352
struct kvm_context *context;
354353

355354
vmcs = alloc_percpu(struct kvm_context);
@@ -365,30 +364,8 @@ static int kvm_loongarch_env_init(void)
365364
return -ENOMEM;
366365
}
367366

368-
/*
369-
* PGD register is shared between root kernel and kvm hypervisor.
370-
* So world switch entry should be in DMW area rather than TLB area
371-
* to avoid page fault reenter.
372-
*
373-
* In future if hardware pagetable walking is supported, we won't
374-
* need to copy world switch code to DMW area.
375-
*/
376-
order = get_order(kvm_exception_size + kvm_enter_guest_size);
377-
addr = (void *)__get_free_pages(GFP_KERNEL, order);
378-
if (!addr) {
379-
free_percpu(vmcs);
380-
vmcs = NULL;
381-
kfree(kvm_loongarch_ops);
382-
kvm_loongarch_ops = NULL;
383-
return -ENOMEM;
384-
}
385-
386-
memcpy(addr, kvm_exc_entry, kvm_exception_size);
387-
memcpy(addr + kvm_exception_size, kvm_enter_guest, kvm_enter_guest_size);
388-
flush_icache_range((unsigned long)addr, (unsigned long)addr + kvm_exception_size + kvm_enter_guest_size);
389-
kvm_loongarch_ops->exc_entry = addr;
390-
kvm_loongarch_ops->enter_guest = addr + kvm_exception_size;
391-
kvm_loongarch_ops->page_order = order;
367+
kvm_loongarch_ops->exc_entry = (void *)kvm_exc_entry;
368+
kvm_loongarch_ops->enter_guest = (void *)kvm_enter_guest;
392369

393370
vpid_mask = read_csr_gstat();
394371
vpid_mask = (vpid_mask & CSR_GSTAT_GIDBIT) >> CSR_GSTAT_GIDBIT_SHIFT;
@@ -422,16 +399,10 @@ static int kvm_loongarch_env_init(void)
422399

423400
static void kvm_loongarch_env_exit(void)
424401
{
425-
unsigned long addr;
426-
427402
if (vmcs)
428403
free_percpu(vmcs);
429404

430405
if (kvm_loongarch_ops) {
431-
if (kvm_loongarch_ops->exc_entry) {
432-
addr = (unsigned long)kvm_loongarch_ops->exc_entry;
433-
free_pages(addr, kvm_loongarch_ops->page_order);
434-
}
435406
kfree(kvm_loongarch_ops);
436407
}
437408

arch/loongarch/kvm/switch.S

Lines changed: 15 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -4,9 +4,11 @@
44
*/
55

66
#include <linux/linkage.h>
7+
#include <linux/kvm_types.h>
78
#include <asm/asm.h>
89
#include <asm/asmmacro.h>
910
#include <asm/loongarch.h>
11+
#include <asm/page.h>
1012
#include <asm/regdef.h>
1113
#include <asm/unwind_hints.h>
1214

@@ -100,8 +102,13 @@
100102
* - is still in guest mode, such as pgd table/vmid registers etc,
101103
* - will fix with hw page walk enabled in future
102104
* load kvm_vcpu from reserved CSR KVM_VCPU_KS, and save a2 to KVM_TEMP_KS
105+
*
106+
* PGD register is shared between root kernel and kvm hypervisor.
107+
* So world switch entry should be in DMW area rather than TLB area
108+
* to avoid page fault re-enter.
103109
*/
104110
.text
111+
.p2align PAGE_SHIFT
105112
.cfi_sections .debug_frame
106113
SYM_CODE_START(kvm_exc_entry)
107114
UNWIND_HINT_UNDEFINED
@@ -190,8 +197,8 @@ ret_to_host:
190197
kvm_restore_host_gpr a2
191198
jr ra
192199

193-
SYM_INNER_LABEL(kvm_exc_entry_end, SYM_L_LOCAL)
194200
SYM_CODE_END(kvm_exc_entry)
201+
EXPORT_SYMBOL_FOR_KVM(kvm_exc_entry)
195202

196203
/*
197204
* int kvm_enter_guest(struct kvm_run *run, struct kvm_vcpu *vcpu)
@@ -215,22 +222,24 @@ SYM_FUNC_START(kvm_enter_guest)
215222
/* Save kvm_vcpu to kscratch */
216223
csrwr a1, KVM_VCPU_KS
217224
kvm_switch_to_guest
218-
SYM_INNER_LABEL(kvm_enter_guest_end, SYM_L_LOCAL)
219225
SYM_FUNC_END(kvm_enter_guest)
226+
EXPORT_SYMBOL_FOR_KVM(kvm_enter_guest)
220227

221228
SYM_FUNC_START(kvm_save_fpu)
222229
fpu_save_csr a0 t1
223230
fpu_save_double a0 t1
224231
fpu_save_cc a0 t1 t2
225232
jr ra
226233
SYM_FUNC_END(kvm_save_fpu)
234+
EXPORT_SYMBOL_FOR_KVM(kvm_save_fpu)
227235

228236
SYM_FUNC_START(kvm_restore_fpu)
229237
fpu_restore_double a0 t1
230238
fpu_restore_csr a0 t1 t2
231239
fpu_restore_cc a0 t1 t2
232240
jr ra
233241
SYM_FUNC_END(kvm_restore_fpu)
242+
EXPORT_SYMBOL_FOR_KVM(kvm_restore_fpu)
234243

235244
#ifdef CONFIG_CPU_HAS_LSX
236245
SYM_FUNC_START(kvm_save_lsx)
@@ -239,13 +248,15 @@ SYM_FUNC_START(kvm_save_lsx)
239248
lsx_save_data a0 t1
240249
jr ra
241250
SYM_FUNC_END(kvm_save_lsx)
251+
EXPORT_SYMBOL_FOR_KVM(kvm_save_lsx)
242252

243253
SYM_FUNC_START(kvm_restore_lsx)
244254
lsx_restore_data a0 t1
245255
fpu_restore_cc a0 t1 t2
246256
fpu_restore_csr a0 t1 t2
247257
jr ra
248258
SYM_FUNC_END(kvm_restore_lsx)
259+
EXPORT_SYMBOL_FOR_KVM(kvm_restore_lsx)
249260
#endif
250261

251262
#ifdef CONFIG_CPU_HAS_LASX
@@ -255,17 +266,16 @@ SYM_FUNC_START(kvm_save_lasx)
255266
lasx_save_data a0 t1
256267
jr ra
257268
SYM_FUNC_END(kvm_save_lasx)
269+
EXPORT_SYMBOL_FOR_KVM(kvm_save_lasx)
258270

259271
SYM_FUNC_START(kvm_restore_lasx)
260272
lasx_restore_data a0 t1
261273
fpu_restore_cc a0 t1 t2
262274
fpu_restore_csr a0 t1 t2
263275
jr ra
264276
SYM_FUNC_END(kvm_restore_lasx)
277+
EXPORT_SYMBOL_FOR_KVM(kvm_restore_lasx)
265278
#endif
266-
.section ".rodata"
267-
SYM_DATA(kvm_exception_size, .quad kvm_exc_entry_end - kvm_exc_entry)
268-
SYM_DATA(kvm_enter_guest_size, .quad kvm_enter_guest_end - kvm_enter_guest)
269279

270280
#ifdef CONFIG_CPU_HAS_LBT
271281
STACK_FRAME_NON_STANDARD kvm_restore_fpu

0 commit comments

Comments
 (0)