Skip to content

Commit 7bb09f7

Browse files
Shawnshhlijinxia
authored andcommitted
fix "Procedure is not pure assembler"
Misra C reqires assembly code should comply with the rules list below: The assembly code's functionality should match the function's name.If not,pls encapsulate the assembly code and give a suitable name for describing the functionality. V1->V2: 1.remove the dead code 2.update detail comment V2->V3: 1.replace the macro name with upper case. 2.remove the typedef and rename the struct name "_descriptor_table_" to "descriptor_table". Tracked-On: #861 Signed-off-by: Huihuang Shi <huihuang.shi@intel.com> Acked-by: Eddie Dong <eddie.dong@intel.com>
1 parent 91fb441 commit 7bb09f7

File tree

9 files changed

+96
-59
lines changed

9 files changed

+96
-59
lines changed

hypervisor/arch/x86/cpu.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -724,7 +724,7 @@ void cpu_dead(uint16_t pcpu_id)
724724

725725
/* Halt the CPU */
726726
do {
727-
asm volatile ("hlt");
727+
hlt_cpu();
728728
} while (halt != 0);
729729
}
730730

hypervisor/arch/x86/gdt.c

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,11 @@ static void set_tss_desc(struct tss_64_descriptor *desc,
2323
desc->high32_value = u2 | (type << 8U) | 0x8000U | u3;
2424
}
2525

26+
static inline void load_gdt(struct host_gdt_descriptor *gdtr)
27+
{
28+
asm volatile ("lgdt %0" ::"m"(*gdtr));
29+
}
30+
2631
void load_gdtr_and_tr(void)
2732
{
2833
struct host_gdt *gdt = &get_cpu_var(gdt);
@@ -48,7 +53,7 @@ void load_gdtr_and_tr(void)
4853
gdtr.len = sizeof(struct host_gdt) - 1U;
4954
gdtr.gdt = gdt;
5055

51-
asm volatile ("lgdt %0" ::"m"(gdtr));
56+
load_gdt(&gdtr);
5257

5358
CPU_LTR_EXECUTE(HOST_GDT_RING0_CPU_TSS_SEL);
5459
}

hypervisor/arch/x86/timer.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -123,7 +123,7 @@ static void init_tsc_deadline_timer(void)
123123
val = VECTOR_TIMER;
124124
val |= APIC_LVTT_TM_TSCDLT; /* TSC deadline and unmask */
125125
msr_write(MSR_IA32_EXT_APIC_LVT_TIMER, val);
126-
asm volatile("mfence" : : : "memory");
126+
cpu_memory_barrier();
127127

128128
/* disarm timer */
129129
msr_write(MSR_IA32_TSC_DEADLINE, 0UL);

hypervisor/arch/x86/trusty.c

Lines changed: 13 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -181,6 +181,17 @@ void destroy_secure_world(struct acrn_vm *vm, bool need_clr_mem)
181181

182182
}
183183

184+
static inline void save_fxstore_guest_area(struct ext_context *ext_ctx)
185+
{
186+
asm volatile("fxsave (%0)"
187+
: : "r" (ext_ctx->fxstore_guest_area) : "memory");
188+
}
189+
190+
static inline void rstor_fxstore_guest_area(const struct ext_context *ext_ctx)
191+
{
192+
asm volatile("fxrstor (%0)" : : "r" (ext_ctx->fxstore_guest_area));
193+
}
194+
184195
static void save_world_ctx(struct acrn_vcpu *vcpu, struct ext_context *ext_ctx)
185196
{
186197
/* cache on-demand run_context for efer/rflags/rsp/rip */
@@ -231,8 +242,7 @@ static void save_world_ctx(struct acrn_vcpu *vcpu, struct ext_context *ext_ctx)
231242
ext_ctx->ia32_kernel_gs_base = msr_read(MSR_IA32_KERNEL_GS_BASE);
232243

233244
/* FX area */
234-
asm volatile("fxsave (%0)"
235-
: : "r" (ext_ctx->fxstore_guest_area) : "memory");
245+
save_fxstore_guest_area(ext_ctx);
236246
}
237247

238248
static void load_world_ctx(struct acrn_vcpu *vcpu, const struct ext_context *ext_ctx)
@@ -279,7 +289,7 @@ static void load_world_ctx(struct acrn_vcpu *vcpu, const struct ext_context *ext
279289
msr_write(MSR_IA32_KERNEL_GS_BASE, ext_ctx->ia32_kernel_gs_base);
280290

281291
/* FX area */
282-
asm volatile("fxrstor (%0)" : : "r" (ext_ctx->fxstore_guest_area));
292+
rstor_fxstore_guest_area(ext_ctx);
283293
}
284294

285295
static void copy_smc_param(const struct run_context *prev_ctx,

hypervisor/arch/x86/vmx.c

Lines changed: 36 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@
66

77
#include <hypervisor.h>
88
#include <vm0_boot.h>
9+
#include <cpu.h>
910
#ifdef CONFIG_EFI_STUB
1011
extern struct efi_context* efi_ctx;
1112
#endif
@@ -51,19 +52,6 @@ bool is_vmx_disabled(void)
5152
*/
5253
static inline void exec_vmxon(void *addr)
5354
{
54-
uint64_t tmp64;
55-
56-
/* Read Feature ControL MSR */
57-
tmp64 = msr_read(MSR_IA32_FEATURE_CONTROL);
58-
59-
/* Check if feature control is locked */
60-
if ((tmp64 & MSR_IA32_FEATURE_CONTROL_LOCK) == 0U) {
61-
/* Lock and enable VMX support */
62-
tmp64 |= (MSR_IA32_FEATURE_CONTROL_LOCK |
63-
MSR_IA32_FEATURE_CONTROL_VMX_NO_SMX);
64-
msr_write(MSR_IA32_FEATURE_CONTROL, tmp64);
65-
}
66-
6755
/* Turn VMX on, pre-conditions can avoid VMfailInvalid
6856
* here no need check RFLAGS since it will generate #GP or #UD
6957
* except VMsuccess. SDM 30.3
@@ -98,6 +86,17 @@ void exec_vmxon_instr(uint16_t pcpu_id)
9886
CPU_CR_READ(cr4, &tmp64);
9987
CPU_CR_WRITE(cr4, tmp64 | CR4_VMXE);
10088

89+
/* Read Feature ControL MSR */
90+
tmp64 = msr_read(MSR_IA32_FEATURE_CONTROL);
91+
92+
/* Check if feature control is locked */
93+
if ((tmp64 & MSR_IA32_FEATURE_CONTROL_LOCK) == 0U) {
94+
/* Lock and enable VMX support */
95+
tmp64 |= (MSR_IA32_FEATURE_CONTROL_LOCK |
96+
MSR_IA32_FEATURE_CONTROL_VMX_NO_SMX);
97+
msr_write(MSR_IA32_FEATURE_CONTROL, tmp64);
98+
}
99+
101100
/* Turn ON VMX */
102101
vmxon_region_pa = hva2hpa(vmxon_region_va);
103102
exec_vmxon(&vmxon_region_pa);
@@ -106,6 +105,11 @@ void exec_vmxon_instr(uint16_t pcpu_id)
106105
exec_vmptrld(&vmcs_pa);
107106
}
108107

108+
static inline void exec_vmxoff(void)
109+
{
110+
asm volatile ("vmxoff" : : : "memory");
111+
}
112+
109113
void vmx_off(uint16_t pcpu_id)
110114
{
111115

@@ -115,7 +119,7 @@ void vmx_off(uint16_t pcpu_id)
115119
vmcs_pa = hva2hpa(vcpu->arch.vmcs);
116120
exec_vmclear((void *)&vmcs_pa);
117121

118-
asm volatile ("vmxoff" : : : "memory");
122+
exec_vmxoff();
119123
}
120124

121125
/**
@@ -605,8 +609,8 @@ static void init_host_state(void)
605609
uint64_t value64;
606610
uint64_t value;
607611
uint64_t tss_addr;
608-
descriptor_table gdtb = {0U, 0UL};
609-
descriptor_table idtb = {0U, 0UL};
612+
uint64_t gdt_base;
613+
uint64_t idt_base;
610614

611615
pr_dbg("*********************");
612616
pr_dbg("Initialize host state");
@@ -619,27 +623,27 @@ static void init_host_state(void)
619623
* GS), * Task Register (TR), * Local Descriptor Table Register (LDTR)
620624
*
621625
***************************************************/
622-
asm volatile ("movw %%es, %%ax":"=a" (value16));
626+
CPU_SEG_WRITE(es, value16);
623627
exec_vmwrite16(VMX_HOST_ES_SEL, value16);
624628
pr_dbg("VMX_HOST_ES_SEL: 0x%hu ", value16);
625629

626-
asm volatile ("movw %%cs, %%ax":"=a" (value16));
630+
CPU_SEG_WRITE(cs, value16);
627631
exec_vmwrite16(VMX_HOST_CS_SEL, value16);
628632
pr_dbg("VMX_HOST_CS_SEL: 0x%hu ", value16);
629633

630-
asm volatile ("movw %%ss, %%ax":"=a" (value16));
634+
CPU_SEG_WRITE(ss, value16);
631635
exec_vmwrite16(VMX_HOST_SS_SEL, value16);
632636
pr_dbg("VMX_HOST_SS_SEL: 0x%hu ", value16);
633637

634-
asm volatile ("movw %%ds, %%ax":"=a" (value16));
638+
CPU_SEG_WRITE(ds, value16);
635639
exec_vmwrite16(VMX_HOST_DS_SEL, value16);
636640
pr_dbg("VMX_HOST_DS_SEL: 0x%hu ", value16);
637641

638-
asm volatile ("movw %%fs, %%ax":"=a" (value16));
642+
CPU_SEG_WRITE(fs, value16);
639643
exec_vmwrite16(VMX_HOST_FS_SEL, value16);
640644
pr_dbg("VMX_HOST_FS_SEL: 0x%hu ", value16);
641645

642-
asm volatile ("movw %%gs, %%ax":"=a" (value16));
646+
CPU_SEG_WRITE(gs, value16);
643647
exec_vmwrite16(VMX_HOST_GS_SEL, value16);
644648
pr_dbg("VMX_HOST_GS_SEL: 0x%hu ", value16);
645649

@@ -654,30 +658,30 @@ static void init_host_state(void)
654658

655659
/* TODO: Should guest GDTB point to host GDTB ? */
656660
/* Obtain the current global descriptor table base */
657-
asm volatile ("sgdt %0":"=m"(gdtb)::"memory");
661+
gdt_base = sgdt();
658662

659-
if (((gdtb.base >> 47U) & 0x1UL) != 0UL) {
660-
gdtb.base |= 0xffff000000000000UL;
663+
if (((gdt_base >> 47U) & 0x1UL) != 0UL) {
664+
gdt_base |= 0xffff000000000000UL;
661665
}
662666

663667
/* Set up the guest and host GDTB base fields with current GDTB base */
664-
exec_vmwrite(VMX_HOST_GDTR_BASE, gdtb.base);
665-
pr_dbg("VMX_HOST_GDTR_BASE: 0x%x ", gdtb.base);
668+
exec_vmwrite(VMX_HOST_GDTR_BASE, gdt_base);
669+
pr_dbg("VMX_HOST_GDTR_BASE: 0x%x ", gdt_base);
666670

667671
tss_addr = hva2hpa((void *)&get_cpu_var(tss));
668672
/* Set up host TR base fields */
669673
exec_vmwrite(VMX_HOST_TR_BASE, tss_addr);
670674
pr_dbg("VMX_HOST_TR_BASE: 0x%016llx ", tss_addr);
671675

672676
/* Obtain the current interrupt descriptor table base */
673-
asm volatile ("sidt %0":"=m"(idtb)::"memory");
677+
idt_base = sidt();
674678
/* base */
675-
if (((idtb.base >> 47U) & 0x1UL) != 0UL) {
676-
idtb.base |= 0xffff000000000000UL;
679+
if (((idt_base >> 47U) & 0x1UL) != 0UL) {
680+
idt_base |= 0xffff000000000000UL;
677681
}
678682

679-
exec_vmwrite(VMX_HOST_IDTR_BASE, idtb.base);
680-
pr_dbg("VMX_HOST_IDTR_BASE: 0x%x ", idtb.base);
683+
exec_vmwrite(VMX_HOST_IDTR_BASE, idt_base);
684+
pr_dbg("VMX_HOST_IDTR_BASE: 0x%x ", idt_base);
681685

682686
/**************************************************/
683687
/* 64-bit fields */

hypervisor/arch/x86/vtd.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -264,7 +264,7 @@ dmar_wait_completion(const struct dmar_drhd_rt *dmar_uint, uint32_t offset,
264264
}
265265
ASSERT(((rdtsc() - start) < CYCLES_PER_MS),
266266
"DMAR OP Timeout!");
267-
asm volatile ("pause" ::: "memory");
267+
pause_cpu();
268268
}
269269
}
270270

hypervisor/debug/dump.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -243,7 +243,7 @@ void asm_assert(int32_t line, const char *file, const char *txt)
243243
show_host_call_trace(rsp, rbp, pcpu_id);
244244
dump_guest_context(pcpu_id);
245245
do {
246-
asm volatile ("pause" ::: "memory");
246+
pause_cpu();
247247
} while (1);
248248
}
249249

hypervisor/include/arch/x86/cpu.h

Lines changed: 37 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -264,6 +264,11 @@ extern spinlock_t trampoline_spinlock;
264264
*/
265265
#define BROADCAST_CPU_ID 0xfffeU
266266

267+
struct descriptor_table {
268+
uint16_t limit;
269+
uint64_t base;
270+
} __attribute__((packed));
271+
267272
/* CPU states defined */
268273
enum pcpu_boot_state {
269274
PCPU_STATE_RESET = 0U,
@@ -326,6 +331,11 @@ void stop_cpus(void);
326331
void wait_sync_change(uint64_t *sync, uint64_t wake_sync);
327332
void cpu_l1d_flush(void);
328333

334+
#define CPU_SEG_WRITE(seg, value16) \
335+
{ \
336+
asm volatile ("mov %%" STRINGIFY(seg) ", %%ax": "=a" (value16)); \
337+
}
338+
329339
/* Read control register */
330340
#define CPU_CR_READ(cr, result_ptr) \
331341
{ \
@@ -341,6 +351,20 @@ void cpu_l1d_flush(void);
341351
: "r"(value)); \
342352
}
343353

354+
static inline uint64_t sgdt(void)
355+
{
356+
struct descriptor_table gdtb = {0U, 0UL};
357+
asm volatile ("sgdt %0":"=m"(gdtb)::"memory");
358+
return gdtb.base;
359+
}
360+
361+
static inline uint64_t sidt(void)
362+
{
363+
struct descriptor_table idtb = {0U, 0UL};
364+
asm volatile ("sidt %0":"=m"(idtb)::"memory");
365+
return idtb.base;
366+
}
367+
344368
/* Read MSR */
345369
static inline void cpu_msr_read(uint32_t reg, uint64_t *msr_val_ptr)
346370
{
@@ -360,6 +384,16 @@ static inline void cpu_msr_write(uint32_t reg, uint64_t msr_val)
360384
asm volatile (" wrmsr " : : "c" (reg), "a" (msrl), "d" (msrh));
361385
}
362386

387+
static inline void pause_cpu(void)
388+
{
389+
asm volatile ("pause" ::: "memory");
390+
}
391+
392+
static inline void hlt_cpu(void)
393+
{
394+
asm volatile ("hlt");
395+
}
396+
363397
#ifdef CONFIG_PARTITION_MODE
364398
#define CPU_IRQ_DISABLE()
365399
#else
@@ -388,22 +422,10 @@ static inline void cpu_sp_write(uint64_t *stack_ptr)
388422
asm volatile ("movq %0, %%rsp" : : "r"(rsp));
389423
}
390424

391-
/* Synchronizes all read accesses from memory */
392-
#define CPU_MEMORY_READ_BARRIER() \
393-
{ \
394-
asm volatile ("lfence\n" : : : "memory"); \
395-
}
396-
397-
/* Synchronizes all write accesses to memory */
398-
#define CPU_MEMORY_WRITE_BARRIER() \
399-
{ \
400-
asm volatile ("sfence\n" : : : "memory"); \
401-
}
402-
403425
/* Synchronizes all read and write accesses to/from memory */
404-
#define CPU_MEMORY_BARRIER() \
405-
{ \
406-
asm volatile ("mfence\n" : : : "memory"); \
426+
static inline void cpu_memory_barrier(void)
427+
{
428+
asm volatile ("mfence\n" : : : "memory");
407429
}
408430

409431
/* Write the task register */

hypervisor/include/arch/x86/vmx.h

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -477,10 +477,6 @@ static inline bool cpu_has_vmx_unrestricted_guest_cap(void)
477477
!= 0UL);
478478
}
479479

480-
typedef struct _descriptor_table_{
481-
uint16_t limit;
482-
uint64_t base;
483-
}__attribute__((packed)) descriptor_table;
484480
#endif /* ASSEMBLER */
485481

486482
#endif /* VMX_H_ */

0 commit comments

Comments
 (0)