Skip to content

Commit ef19ed8

Browse files
binbinwu1Eddie Dong
authored andcommitted
hv: vcpuid: reduce the cyclomatic complexity of function guest_cpuid
This patch reduces the cyclomatic complexity of the function guest_cpuid. Tracked-On: #2834 Signed-off-by: Binbin Wu <binbin.wu@intel.com>
1 parent f0d0616 commit ef19ed8

File tree

1 file changed

+124
-100
lines changed

1 file changed

+124
-100
lines changed

hypervisor/arch/x86/guest/vcpuid.c

Lines changed: 124 additions & 100 deletions
Original file line numberDiff line numberDiff line change
@@ -294,13 +294,132 @@ int32_t set_vcpuid_entries(struct acrn_vm *vm)
294294
return result;
295295
}
296296

297+
static inline bool is_percpu_related(uint32_t leaf)
298+
{
299+
return ((leaf == 0x1U) || (leaf == 0xbU) || (leaf == 0xdU));
300+
}
301+
302+
static void guest_cpuid_01h(struct acrn_vcpu *vcpu, uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
303+
{
304+
uint32_t apicid = vlapic_get_apicid(vcpu_vlapic(vcpu));
305+
306+
cpuid(0x1U, eax, ebx, ecx, edx);
307+
/* Patching initial APIC ID */
308+
*ebx &= ~APIC_ID_MASK;
309+
*ebx |= (apicid << APIC_ID_SHIFT);
310+
311+
if (vm_hide_mtrr(vcpu->vm)) {
312+
/* mask mtrr */
313+
*edx &= ~CPUID_EDX_MTRR;
314+
}
315+
316+
/* mask Debug Store feature */
317+
*ecx &= ~(CPUID_ECX_DTES64 | CPUID_ECX_DS_CPL);
318+
319+
/* mask Safer Mode Extension */
320+
*ecx &= ~CPUID_ECX_SMX;
321+
322+
/* mask PDCM: Perfmon and Debug Capability */
323+
*ecx &= ~CPUID_ECX_PDCM;
324+
325+
/* mask SDBG for silicon debug */
326+
*ecx &= ~CPUID_ECX_SDBG;
327+
328+
/* mask pcid */
329+
*ecx &= ~CPUID_ECX_PCID;
330+
331+
/*mask vmx to guest os */
332+
*ecx &= ~CPUID_ECX_VMX;
333+
334+
/* set Hypervisor Present Bit */
335+
*ecx |= CPUID_ECX_HV;
336+
337+
/*no xsave support for guest if it is not enabled on host*/
338+
if ((*ecx & CPUID_ECX_OSXSAVE) == 0U) {
339+
*ecx &= ~CPUID_ECX_XSAVE;
340+
}
341+
342+
*ecx &= ~CPUID_ECX_OSXSAVE;
343+
if ((*ecx & CPUID_ECX_XSAVE) != 0U) {
344+
uint64_t cr4;
345+
/*read guest CR4*/
346+
cr4 = exec_vmread(VMX_GUEST_CR4);
347+
if ((cr4 & CR4_OSXSAVE) != 0UL) {
348+
*ecx |= CPUID_ECX_OSXSAVE;
349+
}
350+
}
351+
352+
/* mask Debug Store feature */
353+
*edx &= ~CPUID_EDX_DTES;
354+
}
355+
356+
static void guest_cpuid_0bh(struct acrn_vcpu *vcpu, uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
357+
{
358+
uint32_t leaf = 0x0bU;
359+
uint32_t subleaf = *ecx;
360+
361+
/* Patching X2APIC */
362+
if (is_lapic_pt(vcpu->vm)) {
363+
/* for VM with LAPIC_PT, eg. PRE_LAUNCHED_VM or POST_LAUNCHED_VM with LAPIC_PT*/
364+
cpuid_subleaf(leaf, subleaf, eax, ebx, ecx, edx);
365+
} else if (is_sos_vm(vcpu->vm)) {
366+
cpuid_subleaf(leaf, subleaf, eax, ebx, ecx, edx);
367+
} else {
368+
*ecx = subleaf & 0xFFU;
369+
*edx = vlapic_get_apicid(vcpu_vlapic(vcpu));
370+
/* No HT emulation for UOS */
371+
switch (subleaf) {
372+
case 0U:
373+
*eax = 0U;
374+
*ebx = 1U;
375+
*ecx |= (1U << 8U);
376+
break;
377+
case 1U:
378+
if (vcpu->vm->hw.created_vcpus == 1U) {
379+
*eax = 0U;
380+
} else {
381+
*eax = (uint32_t)fls32(vcpu->vm->hw.created_vcpus - 1U) + 1U;
382+
}
383+
*ebx = vcpu->vm->hw.created_vcpus;
384+
*ecx |= (2U << 8U);
385+
break;
386+
default:
387+
*eax = 0U;
388+
*ebx = 0U;
389+
*ecx |= (0U << 8U);
390+
break;
391+
}
392+
}
393+
}
394+
395+
static void guest_cpuid_0dh(__unused struct acrn_vcpu *vcpu, uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
396+
{
397+
uint32_t subleaf = *ecx;
398+
399+
if (!pcpu_has_cap(X86_FEATURE_OSXSAVE)) {
400+
*eax = 0U;
401+
*ebx = 0U;
402+
*ecx = 0U;
403+
*edx = 0U;
404+
} else {
405+
cpuid_subleaf(0x0dU, subleaf, eax, ebx, ecx, edx);
406+
if (subleaf == 0U) {
407+
/* SDM Vol.1 17-2, On processors that do not support Intel MPX,
408+
* CPUID.(EAX=0DH,ECX=0):EAX[3] and
409+
* CPUID.(EAX=0DH,ECX=0):EAX[4] will both be 0 */
410+
*eax &= ~ CPUID_EAX_XCR0_BNDREGS;
411+
*eax &= ~ CPUID_EAX_XCR0_BNDCSR;
412+
}
413+
}
414+
}
415+
297416
void guest_cpuid(struct acrn_vcpu *vcpu, uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
298417
{
299418
uint32_t leaf = *eax;
300419
uint32_t subleaf = *ecx;
301420

302421
/* vm related */
303-
if ((leaf != 0x1U) && (leaf != 0xbU) && (leaf != 0xdU)) {
422+
if (!is_percpu_related(leaf)) {
304423
const struct vcpuid_entry *entry = find_vcpuid_entry(vcpu, leaf, subleaf);
305424

306425
if (entry != NULL) {
@@ -318,110 +437,15 @@ void guest_cpuid(struct acrn_vcpu *vcpu, uint32_t *eax, uint32_t *ebx, uint32_t
318437
/* percpu related */
319438
switch (leaf) {
320439
case 0x01U:
321-
{
322-
cpuid(leaf, eax, ebx, ecx, edx);
323-
uint32_t apicid = vlapic_get_apicid(vcpu_vlapic(vcpu));
324-
/* Patching initial APIC ID */
325-
*ebx &= ~APIC_ID_MASK;
326-
*ebx |= (apicid << APIC_ID_SHIFT);
327-
328-
if (vm_hide_mtrr(vcpu->vm)) {
329-
/* mask mtrr */
330-
*edx &= ~CPUID_EDX_MTRR;
331-
}
332-
333-
/* mask Debug Store feature */
334-
*ecx &= ~(CPUID_ECX_DTES64 | CPUID_ECX_DS_CPL);
335-
336-
/* mask Safer Mode Extension */
337-
*ecx &= ~CPUID_ECX_SMX;
338-
339-
/* mask PDCM: Perfmon and Debug Capability */
340-
*ecx &= ~CPUID_ECX_PDCM;
341-
342-
/* mask SDBG for silicon debug */
343-
*ecx &= ~CPUID_ECX_SDBG;
344-
345-
/* mask pcid */
346-
*ecx &= ~CPUID_ECX_PCID;
347-
348-
/*mask vmx to guest os */
349-
*ecx &= ~CPUID_ECX_VMX;
350-
351-
/* set Hypervisor Present Bit */
352-
*ecx |= CPUID_ECX_HV;
353-
354-
/*no xsave support for guest if it is not enabled on host*/
355-
if ((*ecx & CPUID_ECX_OSXSAVE) == 0U) {
356-
*ecx &= ~CPUID_ECX_XSAVE;
357-
}
358-
359-
*ecx &= ~CPUID_ECX_OSXSAVE;
360-
if ((*ecx & CPUID_ECX_XSAVE) != 0U) {
361-
uint64_t cr4;
362-
/*read guest CR4*/
363-
cr4 = exec_vmread(VMX_GUEST_CR4);
364-
if ((cr4 & CR4_OSXSAVE) != 0UL) {
365-
*ecx |= CPUID_ECX_OSXSAVE;
366-
}
367-
}
368-
369-
/* mask Debug Store feature */
370-
*edx &= ~CPUID_EDX_DTES;
371-
440+
guest_cpuid_01h(vcpu, eax, ebx, ecx, edx);
372441
break;
373-
}
442+
374443
case 0x0bU:
375-
/* Patching X2APIC */
376-
if (is_lapic_pt(vcpu->vm)) {
377-
/* for VM with LAPIC_PT, eg. PRE_LAUNCHED_VM or POST_LAUNCHED_VM with LAPIC_PT*/
378-
cpuid_subleaf(leaf, subleaf, eax, ebx, ecx, edx);
379-
} else if (is_sos_vm(vcpu->vm)) {
380-
cpuid_subleaf(leaf, subleaf, eax, ebx, ecx, edx);
381-
} else {
382-
*ecx = subleaf & 0xFFU;
383-
*edx = vlapic_get_apicid(vcpu_vlapic(vcpu));
384-
/* No HT emulation for UOS */
385-
switch (subleaf) {
386-
case 0U:
387-
*eax = 0U;
388-
*ebx = 1U;
389-
*ecx |= (1U << 8U);
390-
break;
391-
case 1U:
392-
if (vcpu->vm->hw.created_vcpus == 1U) {
393-
*eax = 0U;
394-
} else {
395-
*eax = (uint32_t)fls32(vcpu->vm->hw.created_vcpus - 1U) + 1U;
396-
}
397-
*ebx = vcpu->vm->hw.created_vcpus;
398-
*ecx |= (2U << 8U);
399-
break;
400-
default:
401-
*eax = 0U;
402-
*ebx = 0U;
403-
*ecx |= (0U << 8U);
404-
break;
405-
}
406-
}
444+
guest_cpuid_0bh(vcpu, eax, ebx, ecx, edx);
407445
break;
408446

409447
case 0x0dU:
410-
if (!pcpu_has_cap(X86_FEATURE_OSXSAVE)) {
411-
*eax = 0U;
412-
*ebx = 0U;
413-
*ecx = 0U;
414-
*edx = 0U;
415-
} else {
416-
cpuid_subleaf(leaf, subleaf, eax, ebx, ecx, edx);
417-
if (subleaf == 0U) {
418-
/* SDM Vol.1 17-2, On processors that do not support Intel MPX,
419-
* CPUID.(EAX=0DH,ECX=0):EAX[3] and
420-
* CPUID.(EAX=0DH,ECX=0):EAX[4] will both be 0 */
421-
*eax &= ~ CPUID_EAX_XCR0_BNDREGS;
422-
*eax &= ~ CPUID_EAX_XCR0_BNDCSR;
423-
}
424-
}
448+
guest_cpuid_0dh(vcpu, eax, ebx, ecx, edx);
425449
break;
426450

427451
default:

0 commit comments

Comments
 (0)