From 3e0204044db34760573e7220adf5737b0078b200 Mon Sep 17 00:00:00 2001 From: Ivan Shcherbakov Date: Tue, 22 Feb 2022 21:15:41 -0800 Subject: [PATCH 01/17] whpx: Fixed reporting of the CPU context to GDB for 64-bit Make sure that pausing the VM while in 64-bit mode will set the HF_CS64_MASK flag in env->hflags (see x86_update_hflags() in target/i386/cpu.c). Without it, the code in gdbstub.c would only use the 32-bit register values when debugging 64-bit targets, making debugging effectively impossible. Signed-off-by: Ivan Shcherbakov Message-Id: <00f701d82874$68b02000$3a106000$@sysprogs.com> Signed-off-by: Paolo Bonzini --- target/i386/whpx/whpx-all.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/target/i386/whpx/whpx-all.c b/target/i386/whpx/whpx-all.c index ef896da0a2115..edd4fafbdf2c6 100644 --- a/target/i386/whpx/whpx-all.c +++ b/target/i386/whpx/whpx-all.c @@ -604,6 +604,8 @@ static void whpx_get_registers(CPUState *cpu) whpx_apic_get(x86_cpu->apic_state); } + x86_update_hflags(env); + return; } From 241a50e6f5f9af20303bc4253759497dd3ebc337 Mon Sep 17 00:00:00 2001 From: Ivan Shcherbakov Date: Tue, 22 Feb 2022 21:18:00 -0800 Subject: [PATCH 02/17] whpx: Fixed incorrect CR8/TPR synchronization This fixes the following error triggered when stopping and resuming a 64-bit Linux kernel via gdb: qemu-system-x86_64.exe: WHPX: Failed to set virtual processor context, hr=c0350005 The previous logic for synchronizing the values did not take into account that the lower 4 bits of the CR8 register, containing the priority level, mapped to bits 7:4 of the APIC.TPR register (see section 10.8.6.1 of Volume 3 of Intel 64 and IA-32 Architectures Software Developer's Manual). The caused WHvSetVirtualProcessorRegisters() to fail with an error, effectively preventing GDB from changing the guest context. Signed-off-by: Ivan Shcherbakov Message-Id: <010b01d82874$bb4ef160$31ecd420$@sysprogs.com> Signed-off-by: Paolo Bonzini --- target/i386/whpx/whpx-all.c | 28 +++++++++++++++++++++++++++- 1 file changed, 27 insertions(+), 1 deletion(-) diff --git a/target/i386/whpx/whpx-all.c b/target/i386/whpx/whpx-all.c index edd4fafbdf2c6..63203730bcd04 100644 --- a/target/i386/whpx/whpx-all.c +++ b/target/i386/whpx/whpx-all.c @@ -256,6 +256,21 @@ static int whpx_set_tsc(CPUState *cpu) return 0; } +/* + * The CR8 register in the CPU is mapped to the TPR register of the APIC, + * however, they use a slightly different encoding. Specifically: + * + * APIC.TPR[bits 7:4] = CR8[bits 3:0] + * + * This mechanism is described in section 10.8.6.1 of Volume 3 of Intel 64 + * and IA-32 Architectures Software Developer's Manual. + */ + +static uint64_t whpx_apic_tpr_to_cr8(uint64_t tpr) +{ + return tpr >> 4; +} + static void whpx_set_registers(CPUState *cpu, int level) { struct whpx_state *whpx = &whpx_global; @@ -284,7 +299,7 @@ static void whpx_set_registers(CPUState *cpu, int level) v86 = (env->eflags & VM_MASK); r86 = !(env->cr[0] & CR0_PE_MASK); - vcpu->tpr = cpu_get_apic_tpr(x86_cpu->apic_state); + vcpu->tpr = whpx_apic_tpr_to_cr8(cpu_get_apic_tpr(x86_cpu->apic_state)); vcpu->apic_base = cpu_get_apic_base(x86_cpu->apic_state); idx = 0; @@ -475,6 +490,17 @@ static void whpx_get_registers(CPUState *cpu) hr); } + if (whpx_apic_in_platform()) { + /* + * Fetch the TPR value from the emulated APIC. It may get overwritten + * below with the value from CR8 returned by + * WHvGetVirtualProcessorRegisters(). + */ + whpx_apic_get(x86_cpu->apic_state); + vcpu->tpr = whpx_apic_tpr_to_cr8( + cpu_get_apic_tpr(x86_cpu->apic_state)); + } + idx = 0; /* Indexes for first 16 registers match between HV and QEMU definitions */ From a9fda1f9867df9caf97fad846605ad25251596ed Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Mon, 21 Feb 2022 15:53:15 +0100 Subject: [PATCH 03/17] i386: Add Icelake-Server-v6 CPU model with 5-level EPT support Windows 11 with WSL2 enabled (Hyper-V) fails to boot with Icelake-Server {-v5} CPU model but boots well with '-cpu host'. Apparently, it expects 5-level paging and 5-level EPT support to come in pair but QEMU's Icelake-Server CPU model lacks the later. Introduce 'Icelake-Server-v6' CPU model with 'vmx-page-walk-5' enabled by default. Signed-off-by: Vitaly Kuznetsov Message-Id: <20220221145316.576138-1-vkuznets@redhat.com> Signed-off-by: Paolo Bonzini --- target/i386/cpu.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/target/i386/cpu.c b/target/i386/cpu.c index 6c7ef1099b202..016de1627ba45 100644 --- a/target/i386/cpu.c +++ b/target/i386/cpu.c @@ -3506,6 +3506,14 @@ static const X86CPUDefinition builtin_x86_defs[] = { { /* end of list */ } }, }, + { + .version = 6, + .note = "5-level EPT", + .props = (PropValue[]) { + { "vmx-page-walk-5", "on" }, + { /* end of list */ } + }, + }, { /* end of list */ } } }, From 1c886fa6344fb7eba6baf136a916e2a286eb67e2 Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Mon, 21 Feb 2022 15:53:16 +0100 Subject: [PATCH 04/17] vmxcap: Add 5-level EPT bit 5-level EPT is present in Icelake Server CPUs and is supported by QEMU ('vmx-page-walk-5'). Signed-off-by: Vitaly Kuznetsov Message-Id: <20220221145316.576138-2-vkuznets@redhat.com> Signed-off-by: Paolo Bonzini --- scripts/kvm/vmxcap | 1 + 1 file changed, 1 insertion(+) diff --git a/scripts/kvm/vmxcap b/scripts/kvm/vmxcap index 6fe66d5f5753a..f140040104bfb 100755 --- a/scripts/kvm/vmxcap +++ b/scripts/kvm/vmxcap @@ -249,6 +249,7 @@ controls = [ bits = { 0: 'Execute-only EPT translations', 6: 'Page-walk length 4', + 7: 'Page-walk length 5', 8: 'Paging-structure memory type UC', 14: 'Paging-structure memory type WB', 16: '2MB EPT pages', From 201cf200fe4615ac4295193a8dfe418e152290c6 Mon Sep 17 00:00:00 2001 From: Maxim Levitsky Date: Wed, 23 Feb 2022 13:58:24 +0200 Subject: [PATCH 05/17] KVM: SVM: always set MSR_AMD64_TSC_RATIO to default value Even when the feature is not supported in guest CPUID, still set the msr to the default value which will be the only value KVM will accept in this case Signed-off-by: Maxim Levitsky Message-Id: <20220223115824.319821-1-mlevitsk@redhat.com> Signed-off-by: Paolo Bonzini --- target/i386/cpu.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/target/i386/cpu.c b/target/i386/cpu.c index 016de1627ba45..030d9e1b10310 100644 --- a/target/i386/cpu.c +++ b/target/i386/cpu.c @@ -5938,9 +5938,7 @@ static void x86_cpu_reset(DeviceState *dev) x86_cpu_set_sgxlepubkeyhash(env); - if (env->features[FEAT_SVM] & CPUID_SVM_TSCSCALE) { - env->amd_tsc_scale_msr = MSR_AMD64_TSC_RATIO_DEFAULT; - } + env->amd_tsc_scale_msr = MSR_AMD64_TSC_RATIO_DEFAULT; #endif } From d7d26bdef221030822268831d5f0a3ae485bd068 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Tue, 22 Feb 2022 17:58:11 +0100 Subject: [PATCH 06/17] linux-headers: include missing changes from 5.17 Signed-off-by: Paolo Bonzini --- linux-headers/asm-x86/kvm.h | 3 +++ linux-headers/linux/kvm.h | 4 ++++ 2 files changed, 7 insertions(+) diff --git a/linux-headers/asm-x86/kvm.h b/linux-headers/asm-x86/kvm.h index 2da3316bb5595..bf6e96011dfed 100644 --- a/linux-headers/asm-x86/kvm.h +++ b/linux-headers/asm-x86/kvm.h @@ -452,6 +452,9 @@ struct kvm_sync_regs { #define KVM_STATE_VMX_PREEMPTION_TIMER_DEADLINE 0x00000001 +/* attributes for system fd (group 0) */ +#define KVM_X86_XCOMP_GUEST_SUPP 0 + struct kvm_vmx_nested_state_data { __u8 vmcs12[KVM_STATE_NESTED_VMX_VMCS_SIZE]; __u8 shadow_vmcs12[KVM_STATE_NESTED_VMX_VMCS_SIZE]; diff --git a/linux-headers/linux/kvm.h b/linux-headers/linux/kvm.h index 00af3bc333a55..d232feaae9729 100644 --- a/linux-headers/linux/kvm.h +++ b/linux-headers/linux/kvm.h @@ -1133,6 +1133,7 @@ struct kvm_ppc_resize_hpt { #define KVM_CAP_VM_MOVE_ENC_CONTEXT_FROM 206 #define KVM_CAP_VM_GPA_BITS 207 #define KVM_CAP_XSAVE2 208 +#define KVM_CAP_SYS_ATTRIBUTES 209 #ifdef KVM_CAP_IRQ_ROUTING @@ -2047,4 +2048,7 @@ struct kvm_stats_desc { #define KVM_GET_STATS_FD _IO(KVMIO, 0xce) +/* Available with KVM_CAP_XSAVE2 */ +#define KVM_GET_XSAVE2 _IOR(KVMIO, 0xcf, struct kvm_xsave) + #endif /* __LINUX_KVM_H */ From be2f9e61478e4dfbe0437b4201c769de5041039f Mon Sep 17 00:00:00 2001 From: Jing Liu Date: Wed, 16 Feb 2022 22:04:27 -0800 Subject: [PATCH 07/17] x86: Fix the 64-byte boundary enumeration for extended state The extended state subleaves (EAX=0Dh, ECX=n, n>1).ECX[1] indicate whether the extended state component locates on the next 64-byte boundary following the preceding state component when the compacted format of an XSAVE area is used. Right now, they are all zero because no supported component needed the bit to be set, but the upcoming AMX feature will use it. Fix the subleaves value according to KVM's supported cpuid. Signed-off-by: Jing Liu Signed-off-by: Yang Zhong Message-Id: <20220217060434.52460-2-yang.zhong@intel.com> Signed-off-by: Paolo Bonzini --- target/i386/cpu.c | 1 + target/i386/cpu.h | 6 ++++++ target/i386/kvm/kvm-cpu.c | 1 + 3 files changed, 8 insertions(+) diff --git a/target/i386/cpu.c b/target/i386/cpu.c index 030d9e1b10310..258f128def163 100644 --- a/target/i386/cpu.c +++ b/target/i386/cpu.c @@ -5496,6 +5496,7 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, const ExtSaveArea *esa = &x86_ext_save_areas[count]; *eax = esa->size; *ebx = esa->offset; + *ecx = esa->ecx & ESA_FEATURE_ALIGN64_MASK; } } break; diff --git a/target/i386/cpu.h b/target/i386/cpu.h index e69ab5dd783c1..7bd9d58505863 100644 --- a/target/i386/cpu.h +++ b/target/i386/cpu.h @@ -550,6 +550,11 @@ typedef enum X86Seg { #define XSTATE_Hi16_ZMM_MASK (1ULL << XSTATE_Hi16_ZMM_BIT) #define XSTATE_PKRU_MASK (1ULL << XSTATE_PKRU_BIT) +#define ESA_FEATURE_ALIGN64_BIT 1 + +#define ESA_FEATURE_ALIGN64_MASK (1U << ESA_FEATURE_ALIGN64_BIT) + + /* CPUID feature words */ typedef enum FeatureWord { FEAT_1_EDX, /* CPUID[1].EDX */ @@ -1356,6 +1361,7 @@ QEMU_BUILD_BUG_ON(sizeof(XSavePKRU) != 0x8); typedef struct ExtSaveArea { uint32_t feature, bits; uint32_t offset, size; + uint32_t ecx; } ExtSaveArea; #define XSAVE_STATE_AREA_COUNT (XSTATE_PKRU_BIT + 1) diff --git a/target/i386/kvm/kvm-cpu.c b/target/i386/kvm/kvm-cpu.c index d95028018e892..ce27d3b1dff2a 100644 --- a/target/i386/kvm/kvm-cpu.c +++ b/target/i386/kvm/kvm-cpu.c @@ -104,6 +104,7 @@ static void kvm_cpu_xsave_init(void) if (sz != 0) { assert(esa->size == sz); esa->offset = kvm_arch_get_supported_cpuid(s, 0xd, i, R_EBX); + esa->ecx = kvm_arch_get_supported_cpuid(s, 0xd, i, R_ECX); } } } From 179fc426e4537c5e6f676f490b0096db716bdf4f Mon Sep 17 00:00:00 2001 From: Jing Liu Date: Wed, 16 Feb 2022 22:04:28 -0800 Subject: [PATCH 08/17] x86: Add AMX XTILECFG and XTILEDATA components The AMX TILECFG register and the TMMx tile data registers are saved/restored via XSAVE, respectively in state component 17 (64 bytes) and state component 18 (8192 bytes). Add AMX feature bits to x86_ext_save_areas array to set up AMX components. Add structs that define the layout of AMX XSAVE areas and use QEMU_BUILD_BUG_ON to validate the structs sizes. Signed-off-by: Jing Liu Signed-off-by: Yang Zhong Message-Id: <20220217060434.52460-3-yang.zhong@intel.com> Signed-off-by: Paolo Bonzini --- target/i386/cpu.c | 8 ++++++++ target/i386/cpu.h | 18 +++++++++++++++++- 2 files changed, 25 insertions(+), 1 deletion(-) diff --git a/target/i386/cpu.c b/target/i386/cpu.c index 258f128def163..a5bdf742e9081 100644 --- a/target/i386/cpu.c +++ b/target/i386/cpu.c @@ -1402,6 +1402,14 @@ ExtSaveArea x86_ext_save_areas[XSAVE_STATE_AREA_COUNT] = { [XSTATE_PKRU_BIT] = { .feature = FEAT_7_0_ECX, .bits = CPUID_7_0_ECX_PKU, .size = sizeof(XSavePKRU) }, + [XSTATE_XTILE_CFG_BIT] = { + .feature = FEAT_7_0_EDX, .bits = CPUID_7_0_EDX_AMX_TILE, + .size = sizeof(XSaveXTILECFG), + }, + [XSTATE_XTILE_DATA_BIT] = { + .feature = FEAT_7_0_EDX, .bits = CPUID_7_0_EDX_AMX_TILE, + .size = sizeof(XSaveXTILEDATA) + }, }; static uint32_t xsave_area_size(uint64_t mask) diff --git a/target/i386/cpu.h b/target/i386/cpu.h index 7bd9d58505863..3ff1b49d29e79 100644 --- a/target/i386/cpu.h +++ b/target/i386/cpu.h @@ -539,6 +539,8 @@ typedef enum X86Seg { #define XSTATE_ZMM_Hi256_BIT 6 #define XSTATE_Hi16_ZMM_BIT 7 #define XSTATE_PKRU_BIT 9 +#define XSTATE_XTILE_CFG_BIT 17 +#define XSTATE_XTILE_DATA_BIT 18 #define XSTATE_FP_MASK (1ULL << XSTATE_FP_BIT) #define XSTATE_SSE_MASK (1ULL << XSTATE_SSE_BIT) @@ -847,6 +849,8 @@ typedef uint64_t FeatureWordArray[FEATURE_WORDS]; #define CPUID_7_0_EDX_TSX_LDTRK (1U << 16) /* AVX512_FP16 instruction */ #define CPUID_7_0_EDX_AVX512_FP16 (1U << 23) +/* AMX tile (two-dimensional register) */ +#define CPUID_7_0_EDX_AMX_TILE (1U << 24) /* Speculation Control */ #define CPUID_7_0_EDX_SPEC_CTRL (1U << 26) /* Single Thread Indirect Branch Predictors */ @@ -1350,6 +1354,16 @@ typedef struct XSavePKRU { uint32_t padding; } XSavePKRU; +/* Ext. save area 17: AMX XTILECFG state */ +typedef struct XSaveXTILECFG { + uint8_t xtilecfg[64]; +} XSaveXTILECFG; + +/* Ext. save area 18: AMX XTILEDATA state */ +typedef struct XSaveXTILEDATA { + uint8_t xtiledata[8][1024]; +} XSaveXTILEDATA; + QEMU_BUILD_BUG_ON(sizeof(XSaveAVX) != 0x100); QEMU_BUILD_BUG_ON(sizeof(XSaveBNDREG) != 0x40); QEMU_BUILD_BUG_ON(sizeof(XSaveBNDCSR) != 0x40); @@ -1357,6 +1371,8 @@ QEMU_BUILD_BUG_ON(sizeof(XSaveOpmask) != 0x40); QEMU_BUILD_BUG_ON(sizeof(XSaveZMM_Hi256) != 0x200); QEMU_BUILD_BUG_ON(sizeof(XSaveHi16_ZMM) != 0x400); QEMU_BUILD_BUG_ON(sizeof(XSavePKRU) != 0x8); +QEMU_BUILD_BUG_ON(sizeof(XSaveXTILECFG) != 0x40); +QEMU_BUILD_BUG_ON(sizeof(XSaveXTILEDATA) != 0x2000); typedef struct ExtSaveArea { uint32_t feature, bits; @@ -1364,7 +1380,7 @@ typedef struct ExtSaveArea { uint32_t ecx; } ExtSaveArea; -#define XSAVE_STATE_AREA_COUNT (XSTATE_PKRU_BIT + 1) +#define XSAVE_STATE_AREA_COUNT (XSTATE_XTILE_DATA_BIT + 1) extern ExtSaveArea x86_ext_save_areas[XSAVE_STATE_AREA_COUNT]; From 7c3202091f84cf92261d0a5027a478bdf639c902 Mon Sep 17 00:00:00 2001 From: Yang Zhong Date: Wed, 16 Feb 2022 22:04:29 -0800 Subject: [PATCH 09/17] x86: Grant AMX permission for guest Kernel allocates 4K xstate buffer by default. For XSAVE features which require large state component (e.g. AMX), Linux kernel dynamically expands the xstate buffer only after the process has acquired the necessary permissions. Those are called dynamically- enabled XSAVE features (or dynamic xfeatures). There are separate permissions for native tasks and guests. Qemu should request the guest permissions for dynamic xfeatures which will be exposed to the guest. This only needs to be done once before the first vcpu is created. KVM implemented one new ARCH_GET_XCOMP_SUPP system attribute API to get host side supported_xcr0 and Qemu can decide if it can request dynamically enabled XSAVE features permission. https://lore.kernel.org/all/20220126152210.3044876-1-pbonzini@redhat.com/ Suggested-by: Paolo Bonzini Signed-off-by: Yang Zhong Signed-off-by: Jing Liu Message-Id: <20220217060434.52460-4-yang.zhong@intel.com> Signed-off-by: Paolo Bonzini --- target/i386/cpu.c | 7 +++++ target/i386/cpu.h | 4 +++ target/i386/kvm/kvm-cpu.c | 12 ++++---- target/i386/kvm/kvm.c | 57 ++++++++++++++++++++++++++++++++++++++ target/i386/kvm/kvm_i386.h | 1 + 5 files changed, 75 insertions(+), 6 deletions(-) diff --git a/target/i386/cpu.c b/target/i386/cpu.c index a5bdf742e9081..1d40ef010383f 100644 --- a/target/i386/cpu.c +++ b/target/i386/cpu.c @@ -6013,6 +6013,7 @@ static void x86_cpu_enable_xsave_components(X86CPU *cpu) CPUX86State *env = &cpu->env; int i; uint64_t mask; + static bool request_perm; if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) { env->features[FEAT_XSAVE_COMP_LO] = 0; @@ -6028,6 +6029,12 @@ static void x86_cpu_enable_xsave_components(X86CPU *cpu) } } + /* Only request permission for first vcpu */ + if (kvm_enabled() && !request_perm) { + kvm_request_xsave_components(cpu, mask); + request_perm = true; + } + env->features[FEAT_XSAVE_COMP_LO] = mask; env->features[FEAT_XSAVE_COMP_HI] = mask >> 32; } diff --git a/target/i386/cpu.h b/target/i386/cpu.h index 3ff1b49d29e79..9630f4712aa86 100644 --- a/target/i386/cpu.h +++ b/target/i386/cpu.h @@ -551,6 +551,10 @@ typedef enum X86Seg { #define XSTATE_ZMM_Hi256_MASK (1ULL << XSTATE_ZMM_Hi256_BIT) #define XSTATE_Hi16_ZMM_MASK (1ULL << XSTATE_Hi16_ZMM_BIT) #define XSTATE_PKRU_MASK (1ULL << XSTATE_PKRU_BIT) +#define XSTATE_XTILE_CFG_MASK (1ULL << XSTATE_XTILE_CFG_BIT) +#define XSTATE_XTILE_DATA_MASK (1ULL << XSTATE_XTILE_DATA_BIT) + +#define XSTATE_DYNAMIC_MASK (XSTATE_XTILE_DATA_MASK) #define ESA_FEATURE_ALIGN64_BIT 1 diff --git a/target/i386/kvm/kvm-cpu.c b/target/i386/kvm/kvm-cpu.c index ce27d3b1dff2a..a35a1bf9fe47b 100644 --- a/target/i386/kvm/kvm-cpu.c +++ b/target/i386/kvm/kvm-cpu.c @@ -84,7 +84,7 @@ static void kvm_cpu_max_instance_init(X86CPU *cpu) static void kvm_cpu_xsave_init(void) { static bool first = true; - KVMState *s = kvm_state; + uint32_t eax, ebx, ecx, edx; int i; if (!first) { @@ -100,11 +100,11 @@ static void kvm_cpu_xsave_init(void) ExtSaveArea *esa = &x86_ext_save_areas[i]; if (esa->size) { - int sz = kvm_arch_get_supported_cpuid(s, 0xd, i, R_EAX); - if (sz != 0) { - assert(esa->size == sz); - esa->offset = kvm_arch_get_supported_cpuid(s, 0xd, i, R_EBX); - esa->ecx = kvm_arch_get_supported_cpuid(s, 0xd, i, R_ECX); + host_cpuid(0xd, i, &eax, &ebx, &ecx, &edx); + if (eax != 0) { + assert(esa->size == eax); + esa->offset = ebx; + esa->ecx = ecx; } } } diff --git a/target/i386/kvm/kvm.c b/target/i386/kvm/kvm.c index 2c8feb4a6f7bb..742f0eac4a6cd 100644 --- a/target/i386/kvm/kvm.c +++ b/target/i386/kvm/kvm.c @@ -17,6 +17,7 @@ #include "qapi/error.h" #include #include +#include #include #include "standard-headers/asm-x86/kvm_para.h" @@ -348,6 +349,7 @@ uint32_t kvm_arch_get_supported_cpuid(KVMState *s, uint32_t function, struct kvm_cpuid2 *cpuid; uint32_t ret = 0; uint32_t cpuid_1_edx; + uint64_t bitmask; cpuid = get_supported_cpuid(s); @@ -405,6 +407,25 @@ uint32_t kvm_arch_get_supported_cpuid(KVMState *s, uint32_t function, if (!has_msr_arch_capabs) { ret &= ~CPUID_7_0_EDX_ARCH_CAPABILITIES; } + } else if (function == 0xd && index == 0 && + (reg == R_EAX || reg == R_EDX)) { + struct kvm_device_attr attr = { + .group = 0, + .attr = KVM_X86_XCOMP_GUEST_SUPP, + .addr = (unsigned long) &bitmask + }; + + bool sys_attr = kvm_check_extension(s, KVM_CAP_SYS_ATTRIBUTES); + if (!sys_attr) { + warn_report("cannot get sys attribute capabilities %d", sys_attr); + } + + int rc = kvm_ioctl(s, KVM_GET_DEVICE_ATTR, &attr); + if (rc == -1 && (errno == ENXIO || errno == EINVAL)) { + warn_report("KVM_GET_DEVICE_ATTR(0, KVM_X86_XCOMP_GUEST_SUPP) " + "error: %d", rc); + } + ret = (reg == R_EAX) ? bitmask : bitmask >> 32; } else if (function == 0x80000001 && reg == R_ECX) { /* * It's safe to enable TOPOEXT even if it's not returned by @@ -5148,3 +5169,39 @@ bool kvm_arch_cpu_check_are_resettable(void) { return !sev_es_enabled(); } + +#define ARCH_REQ_XCOMP_GUEST_PERM 0x1025 + +void kvm_request_xsave_components(X86CPU *cpu, uint64_t mask) +{ + KVMState *s = kvm_state; + uint64_t supported; + + mask &= XSTATE_DYNAMIC_MASK; + if (!mask) { + return; + } + /* + * Just ignore bits that are not in CPUID[EAX=0xD,ECX=0]. + * ARCH_REQ_XCOMP_GUEST_PERM would fail, and QEMU has warned + * about them already because they are not supported features. + */ + supported = kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EAX); + supported |= (uint64_t)kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EDX) << 32; + mask &= supported; + + while (mask) { + int bit = ctz64(mask); + int rc = syscall(SYS_arch_prctl, ARCH_REQ_XCOMP_GUEST_PERM, bit); + if (rc) { + /* + * Older kernel version (<5.17) do not support + * ARCH_REQ_XCOMP_GUEST_PERM, but also do not return + * any dynamic feature from kvm_arch_get_supported_cpuid. + */ + warn_report("prctl(ARCH_REQ_XCOMP_GUEST_PERM) failure " + "for feature bit %d", bit); + } + mask &= ~BIT_ULL(bit); + } +} diff --git a/target/i386/kvm/kvm_i386.h b/target/i386/kvm/kvm_i386.h index a978509d507fa..4124912c202ea 100644 --- a/target/i386/kvm/kvm_i386.h +++ b/target/i386/kvm/kvm_i386.h @@ -52,5 +52,6 @@ bool kvm_hyperv_expand_features(X86CPU *cpu, Error **errp); uint64_t kvm_swizzle_msi_ext_dest_id(uint64_t address); bool kvm_enable_sgx_provisioning(KVMState *s); +void kvm_request_xsave_components(X86CPU *cpu, uint64_t mask); #endif From a8cc1c1a7ededd2a3dff0472936a3eefbef20558 Mon Sep 17 00:00:00 2001 From: Jing Liu Date: Wed, 16 Feb 2022 22:04:30 -0800 Subject: [PATCH 10/17] x86: Add XFD faulting bit for state components Intel introduces XFD faulting mechanism for extended XSAVE features to dynamically enable the features in runtime. If CPUID (EAX=0Dh, ECX=n, n>1).ECX[2] is set as 1, it indicates support for XFD faulting of this state component. Signed-off-by: Jing Liu Signed-off-by: Yang Zhong Message-Id: <20220217060434.52460-5-yang.zhong@intel.com> Signed-off-by: Paolo Bonzini --- target/i386/cpu.c | 3 ++- target/i386/cpu.h | 2 ++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/target/i386/cpu.c b/target/i386/cpu.c index 1d40ef010383f..82862fb8d862a 100644 --- a/target/i386/cpu.c +++ b/target/i386/cpu.c @@ -5504,7 +5504,8 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, const ExtSaveArea *esa = &x86_ext_save_areas[count]; *eax = esa->size; *ebx = esa->offset; - *ecx = esa->ecx & ESA_FEATURE_ALIGN64_MASK; + *ecx = esa->ecx & + (ESA_FEATURE_ALIGN64_MASK | ESA_FEATURE_XFD_MASK); } } break; diff --git a/target/i386/cpu.h b/target/i386/cpu.h index 9630f4712aa86..925d0129e29f0 100644 --- a/target/i386/cpu.h +++ b/target/i386/cpu.h @@ -557,8 +557,10 @@ typedef enum X86Seg { #define XSTATE_DYNAMIC_MASK (XSTATE_XTILE_DATA_MASK) #define ESA_FEATURE_ALIGN64_BIT 1 +#define ESA_FEATURE_XFD_BIT 2 #define ESA_FEATURE_ALIGN64_MASK (1U << ESA_FEATURE_ALIGN64_BIT) +#define ESA_FEATURE_XFD_MASK (1U << ESA_FEATURE_XFD_BIT) /* CPUID feature words */ From 168ed1d0f2e87727f2c5d5352c9b5da588d83c9f Mon Sep 17 00:00:00 2001 From: Jing Liu Date: Wed, 16 Feb 2022 22:04:31 -0800 Subject: [PATCH 11/17] x86: Add AMX CPUIDs enumeration Add AMX primary feature bits XFD and AMX_TILE to enumerate the CPU's AMX capability. Meanwhile, add AMX TILE and TMUL CPUID leaf and subleaves which exist when AMX TILE is present to provide the maximum capability of TILE and TMUL. Signed-off-by: Jing Liu Signed-off-by: Yang Zhong Message-Id: <20220217060434.52460-6-yang.zhong@intel.com> Signed-off-by: Paolo Bonzini --- target/i386/cpu.c | 55 ++++++++++++++++++++++++++++++++++++++++--- target/i386/kvm/kvm.c | 4 +++- 2 files changed, 55 insertions(+), 4 deletions(-) diff --git a/target/i386/cpu.c b/target/i386/cpu.c index 82862fb8d862a..a88d6554c88ac 100644 --- a/target/i386/cpu.c +++ b/target/i386/cpu.c @@ -575,6 +575,18 @@ static CPUCacheInfo legacy_l3_cache = { #define INTEL_PT_CYCLE_BITMAP 0x1fff /* Support 0,2^(0~11) */ #define INTEL_PT_PSB_BITMAP (0x003f << 16) /* Support 2K,4K,8K,16K,32K,64K */ +/* CPUID Leaf 0x1D constants: */ +#define INTEL_AMX_TILE_MAX_SUBLEAF 0x1 +#define INTEL_AMX_TOTAL_TILE_BYTES 0x2000 +#define INTEL_AMX_BYTES_PER_TILE 0x400 +#define INTEL_AMX_BYTES_PER_ROW 0x40 +#define INTEL_AMX_TILE_MAX_NAMES 0x8 +#define INTEL_AMX_TILE_MAX_ROWS 0x10 + +/* CPUID Leaf 0x1E constants: */ +#define INTEL_AMX_TMUL_MAX_K 0x10 +#define INTEL_AMX_TMUL_MAX_N 0x40 + void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1, uint32_t vendor2, uint32_t vendor3) { @@ -844,8 +856,8 @@ FeatureWordInfo feature_word_info[FEATURE_WORDS] = { "avx512-vp2intersect", NULL, "md-clear", NULL, NULL, NULL, "serialize", NULL, "tsx-ldtrk", NULL, NULL /* pconfig */, NULL, - NULL, NULL, NULL, "avx512-fp16", - NULL, NULL, "spec-ctrl", "stibp", + NULL, NULL, "amx-bf16", "avx512-fp16", + "amx-tile", "amx-int8", "spec-ctrl", "stibp", NULL, "arch-capabilities", "core-capability", "ssbd", }, .cpuid = { @@ -910,7 +922,7 @@ FeatureWordInfo feature_word_info[FEATURE_WORDS] = { .type = CPUID_FEATURE_WORD, .feat_names = { "xsaveopt", "xsavec", "xgetbv1", "xsaves", - NULL, NULL, NULL, NULL, + "xfd", NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, @@ -5594,6 +5606,43 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, } break; } + case 0x1D: { + /* AMX TILE */ + *eax = 0; + *ebx = 0; + *ecx = 0; + *edx = 0; + if (!(env->features[FEAT_7_0_EDX] & CPUID_7_0_EDX_AMX_TILE)) { + break; + } + + if (count == 0) { + /* Highest numbered palette subleaf */ + *eax = INTEL_AMX_TILE_MAX_SUBLEAF; + } else if (count == 1) { + *eax = INTEL_AMX_TOTAL_TILE_BYTES | + (INTEL_AMX_BYTES_PER_TILE << 16); + *ebx = INTEL_AMX_BYTES_PER_ROW | (INTEL_AMX_TILE_MAX_NAMES << 16); + *ecx = INTEL_AMX_TILE_MAX_ROWS; + } + break; + } + case 0x1E: { + /* AMX TMUL */ + *eax = 0; + *ebx = 0; + *ecx = 0; + *edx = 0; + if (!(env->features[FEAT_7_0_EDX] & CPUID_7_0_EDX_AMX_TILE)) { + break; + } + + if (count == 0) { + /* Highest numbered palette subleaf */ + *ebx = INTEL_AMX_TMUL_MAX_K | (INTEL_AMX_TMUL_MAX_N << 8); + } + break; + } case 0x40000000: /* * CPUID code in kvm_arch_init_vcpu() ignores stuff diff --git a/target/i386/kvm/kvm.c b/target/i386/kvm/kvm.c index 742f0eac4a6cd..b786d6da96489 100644 --- a/target/i386/kvm/kvm.c +++ b/target/i386/kvm/kvm.c @@ -1780,7 +1780,9 @@ int kvm_arch_init_vcpu(CPUState *cs) c = &cpuid_data.entries[cpuid_i++]; } break; - case 0x14: { + case 0x14: + case 0x1d: + case 0x1e: { uint32_t times; c->function = i; From 4f9c00188d2e41e776dd3289554f0b77b7aa773d Mon Sep 17 00:00:00 2001 From: Jing Liu Date: Wed, 16 Feb 2022 22:04:32 -0800 Subject: [PATCH 12/17] x86: add support for KVM_CAP_XSAVE2 and AMX state migration When dynamic xfeatures (e.g. AMX) are used by the guest, the xsave area would be larger than 4KB. KVM_GET_XSAVE2 and KVM_SET_XSAVE under KVM_CAP_XSAVE2 works with a xsave buffer larger than 4KB. Always use the new ioctls under KVM_CAP_XSAVE2 when KVM supports it. Signed-off-by: Jing Liu Signed-off-by: Zeng Guang Signed-off-by: Wei Wang Signed-off-by: Yang Zhong Message-Id: <20220217060434.52460-7-yang.zhong@intel.com> Signed-off-by: Paolo Bonzini --- target/i386/cpu.h | 4 ++++ target/i386/kvm/kvm.c | 42 ++++++++++++++++++++++++-------------- target/i386/xsave_helper.c | 28 +++++++++++++++++++++++++ 3 files changed, 59 insertions(+), 15 deletions(-) diff --git a/target/i386/cpu.h b/target/i386/cpu.h index 925d0129e29f0..8c850e74b827d 100644 --- a/target/i386/cpu.h +++ b/target/i386/cpu.h @@ -1527,6 +1527,10 @@ typedef struct CPUX86State { uint64_t opmask_regs[NB_OPMASK_REGS]; YMMReg zmmh_regs[CPU_NB_REGS]; ZMMReg hi16_zmm_regs[CPU_NB_REGS]; +#ifdef TARGET_X86_64 + uint8_t xtilecfg[64]; + uint8_t xtiledata[8192]; +#endif /* sysenter registers */ uint32_t sysenter_cs; diff --git a/target/i386/kvm/kvm.c b/target/i386/kvm/kvm.c index b786d6da96489..e64c06d3587ae 100644 --- a/target/i386/kvm/kvm.c +++ b/target/i386/kvm/kvm.c @@ -123,6 +123,7 @@ static uint32_t num_architectural_pmu_gp_counters; static uint32_t num_architectural_pmu_fixed_counters; static int has_xsave; +static int has_xsave2; static int has_xcrs; static int has_pit_state2; static int has_sregs2; @@ -1586,6 +1587,26 @@ static Error *invtsc_mig_blocker; #define KVM_MAX_CPUID_ENTRIES 100 +static void kvm_init_xsave(CPUX86State *env) +{ + if (has_xsave2) { + env->xsave_buf_len = QEMU_ALIGN_UP(has_xsave2, 4096); + } else if (has_xsave) { + env->xsave_buf_len = sizeof(struct kvm_xsave); + } else { + return; + } + + env->xsave_buf = qemu_memalign(4096, env->xsave_buf_len); + memset(env->xsave_buf, 0, env->xsave_buf_len); + /* + * The allocated storage must be large enough for all of the + * possible XSAVE state components. + */ + assert(kvm_arch_get_supported_cpuid(kvm_state, 0xd, 0, R_ECX) <= + env->xsave_buf_len); +} + int kvm_arch_init_vcpu(CPUState *cs) { struct { @@ -1615,6 +1636,8 @@ int kvm_arch_init_vcpu(CPUState *cs) cpuid_i = 0; + has_xsave2 = kvm_check_extension(cs->kvm_state, KVM_CAP_XSAVE2); + r = kvm_arch_set_tsc_khz(cs); if (r < 0) { return r; @@ -2004,19 +2027,7 @@ int kvm_arch_init_vcpu(CPUState *cs) if (r) { goto fail; } - - if (has_xsave) { - env->xsave_buf_len = sizeof(struct kvm_xsave); - env->xsave_buf = qemu_memalign(4096, env->xsave_buf_len); - memset(env->xsave_buf, 0, env->xsave_buf_len); - - /* - * The allocated storage must be large enough for all of the - * possible XSAVE state components. - */ - assert(kvm_arch_get_supported_cpuid(kvm_state, 0xd, 0, R_ECX) - <= env->xsave_buf_len); - } + kvm_init_xsave(env); max_nested_state_len = kvm_max_nested_state_length(); if (max_nested_state_len > 0) { @@ -3320,13 +3331,14 @@ static int kvm_get_xsave(X86CPU *cpu) { CPUX86State *env = &cpu->env; void *xsave = env->xsave_buf; - int ret; + int type, ret; if (!has_xsave) { return kvm_get_fpu(cpu); } - ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_XSAVE, xsave); + type = has_xsave2 ? KVM_GET_XSAVE2 : KVM_GET_XSAVE; + ret = kvm_vcpu_ioctl(CPU(cpu), type, xsave); if (ret < 0) { return ret; } diff --git a/target/i386/xsave_helper.c b/target/i386/xsave_helper.c index ac61a96344062..996e9f3bfef53 100644 --- a/target/i386/xsave_helper.c +++ b/target/i386/xsave_helper.c @@ -126,6 +126,20 @@ void x86_cpu_xsave_all_areas(X86CPU *cpu, void *buf, uint32_t buflen) memcpy(pkru, &env->pkru, sizeof(env->pkru)); } + + e = &x86_ext_save_areas[XSTATE_XTILE_CFG_BIT]; + if (e->size && e->offset) { + XSaveXTILECFG *tilecfg = buf + e->offset; + + memcpy(tilecfg, &env->xtilecfg, sizeof(env->xtilecfg)); + } + + e = &x86_ext_save_areas[XSTATE_XTILE_DATA_BIT]; + if (e->size && e->offset && buflen >= e->size + e->offset) { + XSaveXTILEDATA *tiledata = buf + e->offset; + + memcpy(tiledata, &env->xtiledata, sizeof(env->xtiledata)); + } #endif } @@ -247,5 +261,19 @@ void x86_cpu_xrstor_all_areas(X86CPU *cpu, const void *buf, uint32_t buflen) pkru = buf + e->offset; memcpy(&env->pkru, pkru, sizeof(env->pkru)); } + + e = &x86_ext_save_areas[XSTATE_XTILE_CFG_BIT]; + if (e->size && e->offset) { + const XSaveXTILECFG *tilecfg = buf + e->offset; + + memcpy(&env->xtilecfg, tilecfg, sizeof(env->xtilecfg)); + } + + e = &x86_ext_save_areas[XSTATE_XTILE_DATA_BIT]; + if (e->size && e->offset && buflen >= e->size + e->offset) { + const XSaveXTILEDATA *tiledata = buf + e->offset; + + memcpy(&env->xtiledata, tiledata, sizeof(env->xtiledata)); + } #endif } From b7390f6e7a0be099e084464019e45749a04b4a99 Mon Sep 17 00:00:00 2001 From: Zeng Guang Date: Wed, 16 Feb 2022 22:04:33 -0800 Subject: [PATCH 13/17] x86: Support XFD and AMX xsave data migration XFD(eXtended Feature Disable) allows to enable a feature on xsave state while preventing specific user threads from using the feature. Support save and restore XFD MSRs if CPUID.D.1.EAX[4] enumerate to be valid. Likewise migrate the MSRs and related xsave state necessarily. Signed-off-by: Zeng Guang Signed-off-by: Wei Wang Signed-off-by: Yang Zhong Message-Id: <20220217060434.52460-8-yang.zhong@intel.com> Signed-off-by: Paolo Bonzini --- target/i386/cpu.h | 9 +++++++++ target/i386/kvm/kvm.c | 18 +++++++++++++++++ target/i386/machine.c | 46 +++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 73 insertions(+) diff --git a/target/i386/cpu.h b/target/i386/cpu.h index 8c850e74b827d..efea2c78ecfc5 100644 --- a/target/i386/cpu.h +++ b/target/i386/cpu.h @@ -507,6 +507,9 @@ typedef enum X86Seg { #define MSR_VM_HSAVE_PA 0xc0010117 +#define MSR_IA32_XFD 0x000001c4 +#define MSR_IA32_XFD_ERR 0x000001c5 + #define MSR_IA32_BNDCFGS 0x00000d90 #define MSR_IA32_XSS 0x00000da0 #define MSR_IA32_UMWAIT_CONTROL 0xe1 @@ -872,6 +875,8 @@ typedef uint64_t FeatureWordArray[FEATURE_WORDS]; #define CPUID_7_1_EAX_AVX_VNNI (1U << 4) /* AVX512 BFloat16 Instruction */ #define CPUID_7_1_EAX_AVX512_BF16 (1U << 5) +/* XFD Extend Feature Disabled */ +#define CPUID_D_1_EAX_XFD (1U << 4) /* Packets which contain IP payload have LIP values */ #define CPUID_14_0_ECX_LIP (1U << 31) @@ -1616,6 +1621,10 @@ typedef struct CPUX86State { uint64_t msr_rtit_cr3_match; uint64_t msr_rtit_addrs[MAX_RTIT_ADDRS]; + /* Per-VCPU XFD MSRs */ + uint64_t msr_xfd; + uint64_t msr_xfd_err; + /* exception/interrupt handling */ int error_code; int exception_is_int; diff --git a/target/i386/kvm/kvm.c b/target/i386/kvm/kvm.c index e64c06d3587ae..fe8e924846424 100644 --- a/target/i386/kvm/kvm.c +++ b/target/i386/kvm/kvm.c @@ -3276,6 +3276,13 @@ static int kvm_put_msrs(X86CPU *cpu, int level) env->msr_ia32_sgxlepubkeyhash[3]); } + if (env->features[FEAT_XSAVE] & CPUID_D_1_EAX_XFD) { + kvm_msr_entry_add(cpu, MSR_IA32_XFD, + env->msr_xfd); + kvm_msr_entry_add(cpu, MSR_IA32_XFD_ERR, + env->msr_xfd_err); + } + /* Note: MSR_IA32_FEATURE_CONTROL is written separately, see * kvm_put_msr_feature_control. */ } @@ -3668,6 +3675,11 @@ static int kvm_get_msrs(X86CPU *cpu) kvm_msr_entry_add(cpu, MSR_IA32_SGXLEPUBKEYHASH3, 0); } + if (env->features[FEAT_XSAVE] & CPUID_D_1_EAX_XFD) { + kvm_msr_entry_add(cpu, MSR_IA32_XFD, 0); + kvm_msr_entry_add(cpu, MSR_IA32_XFD_ERR, 0); + } + ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_MSRS, cpu->kvm_msr_buf); if (ret < 0) { return ret; @@ -3964,6 +3976,12 @@ static int kvm_get_msrs(X86CPU *cpu) env->msr_ia32_sgxlepubkeyhash[index - MSR_IA32_SGXLEPUBKEYHASH0] = msrs[i].data; break; + case MSR_IA32_XFD: + env->msr_xfd = msrs[i].data; + break; + case MSR_IA32_XFD_ERR: + env->msr_xfd_err = msrs[i].data; + break; } } diff --git a/target/i386/machine.c b/target/i386/machine.c index 6202f4779379a..7c54bada819b8 100644 --- a/target/i386/machine.c +++ b/target/i386/machine.c @@ -1483,6 +1483,48 @@ static const VMStateDescription vmstate_pdptrs = { } }; +static bool xfd_msrs_needed(void *opaque) +{ + X86CPU *cpu = opaque; + CPUX86State *env = &cpu->env; + + return !!(env->features[FEAT_XSAVE] & CPUID_D_1_EAX_XFD); +} + +static const VMStateDescription vmstate_msr_xfd = { + .name = "cpu/msr_xfd", + .version_id = 1, + .minimum_version_id = 1, + .needed = xfd_msrs_needed, + .fields = (VMStateField[]) { + VMSTATE_UINT64(env.msr_xfd, X86CPU), + VMSTATE_UINT64(env.msr_xfd_err, X86CPU), + VMSTATE_END_OF_LIST() + } +}; + +#ifdef TARGET_X86_64 +static bool amx_xtile_needed(void *opaque) +{ + X86CPU *cpu = opaque; + CPUX86State *env = &cpu->env; + + return !!(env->features[FEAT_7_0_EDX] & CPUID_7_0_EDX_AMX_TILE); +} + +static const VMStateDescription vmstate_amx_xtile = { + .name = "cpu/intel_amx_xtile", + .version_id = 1, + .minimum_version_id = 1, + .needed = amx_xtile_needed, + .fields = (VMStateField[]) { + VMSTATE_UINT8_ARRAY(env.xtilecfg, X86CPU, 64), + VMSTATE_UINT8_ARRAY(env.xtiledata, X86CPU, 8192), + VMSTATE_END_OF_LIST() + } +}; +#endif + const VMStateDescription vmstate_x86_cpu = { .name = "cpu", .version_id = 12, @@ -1622,6 +1664,10 @@ const VMStateDescription vmstate_x86_cpu = { &vmstate_msr_tsx_ctrl, &vmstate_msr_intel_sgx, &vmstate_pdptrs, + &vmstate_msr_xfd, +#ifdef TARGET_X86_64 + &vmstate_amx_xtile, +#endif NULL } }; From 3600062b955399088cb7fd2bcfa0558466acf32d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marc-Andr=C3=A9=20Lureau?= Date: Tue, 22 Feb 2022 23:40:01 +0400 Subject: [PATCH 14/17] meson: fix generic location of vss headers MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This is a left-over, despite requesting the change before the merge. Fixes: commit 8821a389 ("configure, meson: replace VSS SDK checks and options with --enable-vss-sdk") Signed-off-by: Marc-André Lureau Message-Id: <20220222194008.610377-2-marcandre.lureau@redhat.com> Signed-off-by: Paolo Bonzini --- meson.build | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/meson.build b/meson.build index 8df40bfac4d70..b871098dbba02 100644 --- a/meson.build +++ b/meson.build @@ -1936,7 +1936,7 @@ have_vss = false if targetos == 'windows' and link_language == 'cpp' have_vss = cxx.compiles(''' #define __MIDL_user_allocate_free_DEFINED__ - #include + #include int main(void) { return VSS_CTX_BACKUP; }''') endif From bd1c4f4a75359193161062f18ceb996c7e539ce3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marc-Andr=C3=A9=20Lureau?= Date: Tue, 22 Feb 2022 23:40:02 +0400 Subject: [PATCH 15/17] qga/vss-win32: check old VSS SDK headers MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The VssCoordinator & VssAdmin interfaces have been moved to vsadmin.h in the Windows SDK. Signed-off-by: Marc-André Lureau Message-Id: <20220222194008.610377-3-marcandre.lureau@redhat.com> Signed-off-by: Paolo Bonzini --- meson.build | 3 +++ qga/vss-win32/install.cpp | 4 ++++ qga/vss-win32/provider.cpp | 4 ++++ qga/vss-win32/vss-common.h | 3 ++- 4 files changed, 13 insertions(+), 1 deletion(-) diff --git a/meson.build b/meson.build index b871098dbba02..101a3f2d31ee0 100644 --- a/meson.build +++ b/meson.build @@ -1933,12 +1933,15 @@ config_host_data.set('CONFIG_AF_VSOCK', cc.compiles(gnu_source_prefix + ''' }''')) have_vss = false +have_vss_sdk = false # old xp/2003 SDK if targetos == 'windows' and link_language == 'cpp' have_vss = cxx.compiles(''' #define __MIDL_user_allocate_free_DEFINED__ #include int main(void) { return VSS_CTX_BACKUP; }''') + have_vss_sdk = cxx.has_header('vscoordint.h') endif +config_host_data.set('HAVE_VSS_SDK', have_vss_sdk) have_ntddscsi = false if targetos == 'windows' diff --git a/qga/vss-win32/install.cpp b/qga/vss-win32/install.cpp index efc5bb9909c09..8076efe3cbb5c 100644 --- a/qga/vss-win32/install.cpp +++ b/qga/vss-win32/install.cpp @@ -13,7 +13,11 @@ #include "qemu/osdep.h" #include "vss-common.h" +#ifdef HAVE_VSS_SDK #include +#else +#include +#endif #include "install.h" #include #include diff --git a/qga/vss-win32/provider.cpp b/qga/vss-win32/provider.cpp index fd187fb66f673..1b885e24ee179 100644 --- a/qga/vss-win32/provider.cpp +++ b/qga/vss-win32/provider.cpp @@ -12,7 +12,11 @@ #include "qemu/osdep.h" #include "vss-common.h" +#ifdef HAVE_VSS_SDK #include +#else +#include +#endif #include #define VSS_TIMEOUT_MSEC (60*1000) diff --git a/qga/vss-win32/vss-common.h b/qga/vss-win32/vss-common.h index 54f8de8c8851d..0e67e7822ce67 100644 --- a/qga/vss-win32/vss-common.h +++ b/qga/vss-win32/vss-common.h @@ -64,12 +64,13 @@ const CLSID CLSID_QGAVSSProvider = { 0x6e6a3492, 0x8d4d, 0x440c, const TCHAR g_szClsid[] = TEXT("{6E6A3492-8D4D-440C-9619-5E5D0CC31CA8}"); const TCHAR g_szProgid[] = TEXT("QGAVSSProvider"); +#ifdef HAVE_VSS_SDK /* Enums undefined in VSS SDK 7.2 but defined in newer Windows SDK */ enum __VSS_VOLUME_SNAPSHOT_ATTRIBUTES { VSS_VOLSNAP_ATTR_NO_AUTORECOVERY = 0x00000002, VSS_VOLSNAP_ATTR_TXF_RECOVERY = 0x02000000 }; - +#endif /* COM pointer utility; call ->Release() when it goes out of scope */ template From 4d43a424df7af9245d84c52b5c02923dc2c0e3d2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marc-Andr=C3=A9=20Lureau?= Date: Tue, 22 Feb 2022 23:40:03 +0400 Subject: [PATCH 16/17] qga/vss: update informative message about MinGW MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The headers are now all available in MinGW master branch. (commit 13390dbbf885f and earlier) aiming for 10.0. Signed-off-by: Marc-André Lureau Message-Id: <20220222194008.610377-4-marcandre.lureau@redhat.com> Signed-off-by: Paolo Bonzini --- qga/meson.build | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/qga/meson.build b/qga/meson.build index 54f2da5b07635..62472747f1bbd 100644 --- a/qga/meson.build +++ b/qga/meson.build @@ -15,7 +15,7 @@ have_qga_vss = get_option('qga_vss') \ If your Visual Studio installation doesn't have the VSS headers, Please download and install Microsoft VSS SDK: http://www.microsoft.com/en-us/download/details.aspx?id=23490 - On POSIX-systems, MinGW doesn't yet provide working headers. + On POSIX-systems, MinGW should provide headers in >=10.0 releases. you can extract the SDK headers by: $ scripts/extract-vsssdk-headers setup.exe The headers are extracted in the directory 'inc/win2003'. From 339f97120fb39f41a77884e0c73de06accc2aaf4 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Mon, 28 Feb 2022 18:21:25 +0100 Subject: [PATCH 17/17] update meson-buildoptions.sh Signed-off-by: Paolo Bonzini --- scripts/meson-buildoptions.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/meson-buildoptions.sh b/scripts/meson-buildoptions.sh index 9ee684ef034b3..1e26f4571ef4f 100644 --- a/scripts/meson-buildoptions.sh +++ b/scripts/meson-buildoptions.sh @@ -20,7 +20,6 @@ meson_options_help() { printf "%s\n" ' --enable-malloc=CHOICE choose memory allocator to use [system] (choices:' printf "%s\n" ' jemalloc/system/tcmalloc)' printf "%s\n" ' --enable-profiler profiler support' - printf "%s\n" ' --enable-qga-vss build QGA VSS support' printf "%s\n" ' --enable-qom-cast-debug cast debugging support' printf "%s\n" ' --enable-rng-none dummy RNG, avoid using /dev/(u)random and' printf "%s\n" ' getrandom()' @@ -97,6 +96,7 @@ meson_options_help() { printf "%s\n" ' parallels parallels image format support' printf "%s\n" ' qcow1 qcow1 image format support' printf "%s\n" ' qed qed image format support' + printf "%s\n" ' qga-vss build QGA VSS support (broken with MinGW)' printf "%s\n" ' rbd Ceph block device driver' printf "%s\n" ' replication replication support' printf "%s\n" ' sdl SDL user interface'