Skip to content

Commit 6703879

Browse files
shiqingglijinxia
authored andcommitted
hv: treewide: convert some MACROs to inline functions
MISRA-C requires that each parameter in the MACRO shall be in brackets. In some cases, adding brackets for all of the parameters may not be a perfect solution. For example, it may affect the code readability when there are many parameters used in the MACRO. And duplicated brackets will appear when one MACRO called another MACRO which is using same parameters. This patch convert some MACROs to inline functions to avoid such cases. v1 -> v2: * Remove the unnecessary changes in hypervisor/bsp/uefi/efi/boot.h Tracked-On: #861 Signed-off-by: Shiqing Gao <shiqing.gao@intel.com> Reviewed-by: Junjie Mao <junjie.mao@intel.com>
1 parent 37fd387 commit 6703879

File tree

6 files changed

+119
-79
lines changed

6 files changed

+119
-79
lines changed

hypervisor/arch/x86/guest/ucode.c

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -23,8 +23,11 @@ uint64_t get_microcode_version(void)
2323
* According to SDM vol 3 Table 9-7. If data_size field of uCode
2424
* header is zero, the ucode length is 2000
2525
*/
26-
#define UCODE_GET_DATA_SIZE(uhdr) \
27-
((uhdr.data_size != 0U) ? uhdr.data_size : 2000U)
26+
static inline size_t get_ucode_data_size(struct ucode_header *uhdr)
27+
{
28+
return ((uhdr->data_size != 0U) ? uhdr->data_size : 2000U);
29+
}
30+
2831
void acrn_update_ucode(struct vcpu *vcpu, uint64_t v)
2932
{
3033
uint64_t gva, fault_addr;
@@ -47,7 +50,7 @@ void acrn_update_ucode(struct vcpu *vcpu, uint64_t v)
4750
return;
4851
}
4952

50-
data_size = UCODE_GET_DATA_SIZE(uhdr) + sizeof(struct ucode_header);
53+
data_size = get_ucode_data_size(&uhdr) + sizeof(struct ucode_header);
5154
data_page_num =
5255
((data_size + CPU_PAGE_SIZE) - 1U) >> CPU_PAGE_SHIFT;
5356

hypervisor/arch/x86/io.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -336,9 +336,9 @@ int32_t pio_instr_vmexit_handler(struct vcpu *vcpu)
336336
exit_qual = vcpu->arch_vcpu.exit_qualification;
337337

338338
io_req->type = REQ_PORTIO;
339-
pio_req->size = VM_EXIT_IO_INSTRUCTION_SIZE(exit_qual) + 1UL;
340-
pio_req->address = VM_EXIT_IO_INSTRUCTION_PORT_NUMBER(exit_qual);
341-
if (VM_EXIT_IO_INSTRUCTION_ACCESS_DIRECTION(exit_qual) == 0UL) {
339+
pio_req->size = vm_exit_io_instruction_size(exit_qual) + 1UL;
340+
pio_req->address = vm_exit_io_instruction_port_number(exit_qual);
341+
if (vm_exit_io_instruction_access_direction(exit_qual) == 0UL) {
342342
pio_req->direction = REQUEST_WRITE;
343343
pio_req->value = (uint32_t)vcpu_get_gpreg(vcpu, CPU_REG_RAX);
344344
} else {

hypervisor/arch/x86/vmexit.c

Lines changed: 14 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -264,23 +264,26 @@ int cpuid_vmexit_handler(struct vcpu *vcpu)
264264
int cr_access_vmexit_handler(struct vcpu *vcpu)
265265
{
266266
uint64_t reg;
267-
int idx = VM_EXIT_CR_ACCESS_REG_IDX(vcpu->arch_vcpu.exit_qualification);
267+
uint32_t idx;
268+
uint64_t exit_qual;
268269

269-
ASSERT((idx>=0) && (idx<=15), "index out of range");
270+
exit_qual = vcpu->arch_vcpu.exit_qualification;
271+
idx = (uint32_t)vm_exit_cr_access_reg_idx(exit_qual);
272+
273+
ASSERT((idx <= 15U), "index out of range");
270274
reg = vcpu_get_gpreg(vcpu, idx);
271275

272-
switch ((VM_EXIT_CR_ACCESS_ACCESS_TYPE
273-
(vcpu->arch_vcpu.exit_qualification) << 4) |
274-
VM_EXIT_CR_ACCESS_CR_NUM(vcpu->arch_vcpu.exit_qualification)) {
275-
case 0x00U:
276+
switch ((vm_exit_cr_access_type(exit_qual) << 4U) |
277+
vm_exit_cr_access_cr_num(exit_qual)) {
278+
case 0x00UL:
276279
/* mov to cr0 */
277280
vcpu_set_cr0(vcpu, reg);
278281
break;
279-
case 0x04U:
282+
case 0x04UL:
280283
/* mov to cr4 */
281284
vcpu_set_cr4(vcpu, reg);
282285
break;
283-
case 0x08U:
286+
case 0x08UL:
284287
/* mov to cr8 */
285288
/* According to SDM 6.15 "Exception and interrupt Reference":
286289
*
@@ -293,7 +296,7 @@ int cr_access_vmexit_handler(struct vcpu *vcpu)
293296
}
294297
vlapic_set_cr8(vcpu->arch_vcpu.vlapic, reg);
295298
break;
296-
case 0x18U:
299+
case 0x18UL:
297300
/* mov from cr8 */
298301
reg = vlapic_get_cr8(vcpu->arch_vcpu.vlapic);
299302
vcpu_set_gpreg(vcpu, idx, reg);
@@ -303,11 +306,8 @@ int cr_access_vmexit_handler(struct vcpu *vcpu)
303306
return -EINVAL;
304307
}
305308

306-
TRACE_2L(TRACE_VMEXIT_CR_ACCESS,
307-
VM_EXIT_CR_ACCESS_ACCESS_TYPE
308-
(vcpu->arch_vcpu.exit_qualification),
309-
VM_EXIT_CR_ACCESS_CR_NUM
310-
(vcpu->arch_vcpu.exit_qualification));
309+
TRACE_2L(TRACE_VMEXIT_CR_ACCESS, vm_exit_cr_access_type(exit_qual),
310+
vm_exit_cr_access_cr_num(exit_qual));
311311

312312
return 0;
313313
}

hypervisor/common/hv_main.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -70,7 +70,7 @@ void vcpu_thread(struct vcpu *vcpu)
7070

7171
/* Restore guest TSC_AUX */
7272
if (vcpu->launched) {
73-
CPU_MSR_WRITE(MSR_IA32_TSC_AUX,
73+
cpu_msr_write(MSR_IA32_TSC_AUX,
7474
vcpu->msr_tsc_aux_guest);
7575
}
7676

@@ -87,9 +87,9 @@ void vcpu_thread(struct vcpu *vcpu)
8787

8888
vcpu->arch_vcpu.nrexits++;
8989
/* Save guest TSC_AUX */
90-
CPU_MSR_READ(MSR_IA32_TSC_AUX, &vcpu->msr_tsc_aux_guest);
90+
cpu_msr_read(MSR_IA32_TSC_AUX, &vcpu->msr_tsc_aux_guest);
9191
/* Restore native TSC_AUX */
92-
CPU_MSR_WRITE(MSR_IA32_TSC_AUX, tsc_aux_hyp_cpu);
92+
cpu_msr_write(MSR_IA32_TSC_AUX, tsc_aux_hyp_cpu);
9393

9494
CPU_IRQ_ENABLE();
9595
/* Dispatch handler */

hypervisor/include/arch/x86/cpu.h

Lines changed: 27 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -349,22 +349,22 @@ void wait_sync_change(uint64_t *sync, uint64_t wake_sync);
349349
}
350350

351351
/* Read MSR */
352-
#define CPU_MSR_READ(reg, msr_val_ptr) \
353-
{ \
354-
uint32_t msrl, msrh; \
355-
asm volatile (" rdmsr ":"=a"(msrl), \
356-
"=d"(msrh) : "c" (reg)); \
357-
*msr_val_ptr = ((uint64_t)msrh<<32) | msrl; \
352+
static inline void cpu_msr_read(uint32_t reg, uint64_t *msr_val_ptr)
353+
{
354+
uint32_t msrl, msrh;
355+
356+
asm volatile (" rdmsr ":"=a"(msrl), "=d"(msrh) : "c" (reg));
357+
*msr_val_ptr = ((uint64_t)msrh << 32U) | msrl;
358358
}
359359

360360
/* Write MSR */
361-
#define CPU_MSR_WRITE(reg, msr_val) \
362-
{ \
363-
uint32_t msrl, msrh; \
364-
msrl = (uint32_t)msr_val; \
365-
msrh = (uint32_t)(msr_val >> 32); \
366-
asm volatile (" wrmsr " : : "c" (reg), \
367-
"a" (msrl), "d" (msrh)); \
361+
static inline void cpu_msr_write(uint32_t reg, uint64_t msr_val)
362+
{
363+
uint32_t msrl, msrh;
364+
365+
msrl = (uint32_t)msr_val;
366+
msrh = (uint32_t)(msr_val >> 32U);
367+
asm volatile (" wrmsr " : : "c" (reg), "a" (msrl), "d" (msrh));
368368
}
369369

370370
#ifdef CONFIG_PARTITION_MODE
@@ -388,10 +388,11 @@ void wait_sync_change(uint64_t *sync, uint64_t wake_sync);
388388
#endif
389389

390390
/* This macro writes the stack pointer. */
391-
#define CPU_SP_WRITE(stack_ptr) \
392-
{ \
393-
uint64_t rsp = (uint64_t)stack_ptr & ~(CPU_STACK_ALIGN - 1UL); \
394-
asm volatile ("movq %0, %%rsp" : : "r"(rsp)); \
391+
static inline void cpu_sp_write(uint64_t *stack_ptr)
392+
{
393+
uint64_t rsp = (uint64_t)stack_ptr & ~(CPU_STACK_ALIGN - 1UL);
394+
395+
asm volatile ("movq %0, %%rsp" : : "r"(rsp));
395396
}
396397

397398
/* Synchronizes all read accesses from memory */
@@ -419,12 +420,13 @@ void wait_sync_change(uint64_t *sync, uint64_t wake_sync);
419420
}
420421

421422
/* Read time-stamp counter / processor ID */
422-
#define CPU_RDTSCP_EXECUTE(timestamp_ptr, cpu_id_ptr) \
423-
{ \
424-
uint32_t tsl, tsh; \
425-
asm volatile ("rdtscp":"=a"(tsl), "=d"(tsh), \
426-
"=c"(*cpu_id_ptr)); \
427-
*timestamp_ptr = ((uint64_t)tsh << 32) | tsl; \
423+
static inline void
424+
cpu_rdtscp_execute(uint64_t *timestamp_ptr, uint32_t *cpu_id_ptr)
425+
{
426+
uint32_t tsl, tsh;
427+
428+
asm volatile ("rdtscp":"=a"(tsl), "=d"(tsh), "=c"(*cpu_id_ptr));
429+
*timestamp_ptr = ((uint64_t)tsh << 32U) | tsl;
428430
}
429431

430432
/* Macro to save rflags register */
@@ -500,21 +502,19 @@ static inline uint64_t cpu_rbp_get(void)
500502
return ret;
501503
}
502504

503-
504-
505505
static inline uint64_t
506506
msr_read(uint32_t reg_num)
507507
{
508508
uint64_t msr_val;
509509

510-
CPU_MSR_READ(reg_num, &msr_val);
510+
cpu_msr_read(reg_num, &msr_val);
511511
return msr_val;
512512
}
513513

514514
static inline void
515515
msr_write(uint32_t reg_num, uint64_t value64)
516516
{
517-
CPU_MSR_WRITE(reg_num, value64);
517+
cpu_msr_write(reg_num, value64);
518518
}
519519

520520
static inline void

hypervisor/include/arch/x86/vmexit.h

Lines changed: 66 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -17,35 +17,72 @@ int vmcall_vmexit_handler(struct vcpu *vcpu);
1717
int cpuid_vmexit_handler(struct vcpu *vcpu);
1818
int cr_access_vmexit_handler(struct vcpu *vcpu);
1919

20-
#define VM_EXIT_QUALIFICATION_BIT_MASK(exit_qual, MSB, LSB) \
21-
(exit_qual & (((1UL << (MSB+1U))-1UL) - ((1UL << (LSB))-1UL)))
22-
23-
24-
/* MACROs to access Control-Register Info using exit qualification field */
25-
#define VM_EXIT_CR_ACCESS_CR_NUM(exit_qual) \
26-
(VM_EXIT_QUALIFICATION_BIT_MASK(exit_qual, 3U, 0U) >> 0U)
27-
#define VM_EXIT_CR_ACCESS_ACCESS_TYPE(exit_qual) \
28-
(VM_EXIT_QUALIFICATION_BIT_MASK(exit_qual, 5U, 4U) >> 4U)
29-
#define VM_EXIT_CR_ACCESS_LMSW_OP(exit_qual) \
30-
(VM_EXIT_QUALIFICATION_BIT_MASK(exit_qual, 6U, 6U) >> 6U)
31-
#define VM_EXIT_CR_ACCESS_REG_IDX(exit_qual) \
32-
(VM_EXIT_QUALIFICATION_BIT_MASK(exit_qual, 11U, 8U) >> 8U)
33-
#define VM_EXIT_CR_ACCESS_LMSW_SRC_DATE(exit_qual) \
34-
(VM_EXIT_QUALIFICATION_BIT_MASK(exit_qual, 31U, 16U) >> 16U)
35-
36-
/* MACROs to access IO Access Info using exit qualification field */
37-
#define VM_EXIT_IO_INSTRUCTION_SIZE(exit_qual) \
38-
(VM_EXIT_QUALIFICATION_BIT_MASK(exit_qual, 2U, 0U) >> 0U)
39-
#define VM_EXIT_IO_INSTRUCTION_ACCESS_DIRECTION(exit_qual) \
40-
(VM_EXIT_QUALIFICATION_BIT_MASK(exit_qual, 3U, 3U) >> 3U)
41-
#define VM_EXIT_IO_INSTRUCTION_IS_STRING(exit_qual) \
42-
(VM_EXIT_QUALIFICATION_BIT_MASK(exit_qual, 4U, 4U) >> 4U)
43-
#define VM_EXIT_IO_INSTRUCTION_IS_REP_PREFIXED(exit_qual) \
44-
(VM_EXIT_QUALIFICATION_BIT_MASK(exit_qual, 5U, 5U) >> 5U)
45-
#define VM_EXIT_IO_INSTRUCTION_IS_OPERAND_ENCODING(exit_qual) \
46-
(VM_EXIT_QUALIFICATION_BIT_MASK(exit_qual, 6U, 6U) >> 6U)
47-
#define VM_EXIT_IO_INSTRUCTION_PORT_NUMBER(exit_qual) \
48-
(VM_EXIT_QUALIFICATION_BIT_MASK(exit_qual, 31U, 16U) >> 16U)
20+
static inline uint64_t
21+
vm_exit_qualification_bit_mask(uint64_t exit_qual, uint32_t msb, uint32_t lsb)
22+
{
23+
return (exit_qual &
24+
(((1UL << (msb + 1U)) - 1UL) - ((1UL << lsb) - 1UL)));
25+
}
26+
27+
/* access Control-Register Info using exit qualification field */
28+
static inline uint64_t vm_exit_cr_access_cr_num(uint64_t exit_qual)
29+
{
30+
return (vm_exit_qualification_bit_mask(exit_qual, 3U, 0U) >> 0U);
31+
}
32+
33+
static inline uint64_t vm_exit_cr_access_type(uint64_t exit_qual)
34+
{
35+
return (vm_exit_qualification_bit_mask(exit_qual, 5U, 4U) >> 4U);
36+
}
37+
38+
static inline uint64_t vm_exit_cr_access_lmsw_op(uint64_t exit_qual)
39+
{
40+
return (vm_exit_qualification_bit_mask(exit_qual, 6U, 6U) >> 6U);
41+
}
42+
43+
static inline uint64_t vm_exit_cr_access_reg_idx(uint64_t exit_qual)
44+
{
45+
return (vm_exit_qualification_bit_mask(exit_qual, 11U, 8U) >> 8U);
46+
}
47+
48+
static inline uint64_t vm_exit_cr_access_lmsw_src_date(uint64_t exit_qual)
49+
{
50+
return (vm_exit_qualification_bit_mask(exit_qual, 31U, 16U) >> 16U);
51+
}
52+
53+
/* access IO Access Info using exit qualification field */
54+
static inline uint64_t vm_exit_io_instruction_size(uint64_t exit_qual)
55+
{
56+
return (vm_exit_qualification_bit_mask(exit_qual, 2U, 0U) >> 0U);
57+
}
58+
59+
static inline uint64_t
60+
vm_exit_io_instruction_access_direction(uint64_t exit_qual)
61+
{
62+
return (vm_exit_qualification_bit_mask(exit_qual, 3U, 3U) >> 3U);
63+
}
64+
65+
static inline uint64_t vm_exit_io_instruction_is_string(uint64_t exit_qual)
66+
{
67+
return (vm_exit_qualification_bit_mask(exit_qual, 4U, 4U) >> 4U);
68+
}
69+
70+
static inline uint64_t
71+
vm_exit_io_instruction_is_rep_prefixed(uint64_t exit_qual)
72+
{
73+
return (vm_exit_qualification_bit_mask(exit_qual, 5U, 5U) >> 5U);
74+
}
75+
76+
static inline uint64_t
77+
vm_exit_io_instruction_is_operand_encoding(uint64_t exit_qual)
78+
{
79+
return (vm_exit_qualification_bit_mask(exit_qual, 6U, 6U) >> 6U);
80+
}
81+
82+
static inline uint64_t vm_exit_io_instruction_port_number(uint64_t exit_qual)
83+
{
84+
return (vm_exit_qualification_bit_mask(exit_qual, 31U, 16U) >> 16U);
85+
}
4986

5087
#ifdef HV_DEBUG
5188
void get_vmexit_profile(char *str_arg, int str_max);

0 commit comments

Comments
 (0)