Skip to content

Commit

Permalink
KVM: arm64: Use per guest ID register for ID_AA64PFR0_EL1.[CSV2|CSV3]
Browse files Browse the repository at this point in the history
With per guest ID registers, ID_AA64PFR0_EL1.[CSV2|CSV3] settings from
userspace can be stored in its corresponding ID register.

The setting of CSV bits for protected VMs are removed according to the
discussion from Fuad below:
https://lore.kernel.org/all/CA+EHjTwXA9TprX4jeG+-D+c8v9XG+oFdU1o6TSkvVye145_OvA@mail.gmail.com

Besides the removal of CSV bits setting for protected VMs, No other
functional change intended.

Signed-off-by: Jing Zhang <jingzhangos@google.com>
  • Loading branch information
jingzhangos authored and intel-lab-lkp committed May 3, 2023
1 parent 7a40543 commit 5c88787
Show file tree
Hide file tree
Showing 3 changed files with 44 additions and 31 deletions.
2 changes: 0 additions & 2 deletions arch/arm64/include/asm/kvm_host.h
Original file line number Diff line number Diff line change
Expand Up @@ -248,8 +248,6 @@ struct kvm_arch {

cpumask_var_t supported_cpus;

u8 pfr0_csv2;
u8 pfr0_csv3;
struct {
u8 imp:4;
u8 unimp:4;
Expand Down
17 changes: 0 additions & 17 deletions arch/arm64/kvm/arm.c
Original file line number Diff line number Diff line change
Expand Up @@ -104,22 +104,6 @@ static int kvm_arm_default_max_vcpus(void)
return vgic_present ? kvm_vgic_get_max_vcpus() : KVM_MAX_VCPUS;
}

static void set_default_spectre(struct kvm *kvm)
{
/*
* The default is to expose CSV2 == 1 if the HW isn't affected.
* Although this is a per-CPU feature, we make it global because
* asymmetric systems are just a nuisance.
*
* Userspace can override this as long as it doesn't promise
* the impossible.
*/
if (arm64_get_spectre_v2_state() == SPECTRE_UNAFFECTED)
kvm->arch.pfr0_csv2 = 1;
if (arm64_get_meltdown_state() == SPECTRE_UNAFFECTED)
kvm->arch.pfr0_csv3 = 1;
}

/**
* kvm_arch_init_vm - initializes a VM data structure
* @kvm: pointer to the KVM struct
Expand Down Expand Up @@ -151,7 +135,6 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
/* The maximum number of VCPUs is limited by the host's GIC model */
kvm->max_vcpus = kvm_arm_default_max_vcpus();

set_default_spectre(kvm);
kvm_arm_init_hypercalls(kvm);
kvm_arm_init_id_regs(kvm);

Expand Down
56 changes: 44 additions & 12 deletions arch/arm64/kvm/id_regs.c
Original file line number Diff line number Diff line change
Expand Up @@ -61,12 +61,6 @@ u64 kvm_arm_read_id_reg(const struct kvm_vcpu *vcpu, u32 id)
if (!vcpu_has_sve(vcpu))
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_SVE);
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_AMU);
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2);
val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2),
(u64)vcpu->kvm->arch.pfr0_csv2);
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3);
val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3),
(u64)vcpu->kvm->arch.pfr0_csv3);
if (kvm_vgic_global_state.type == VGIC_V3) {
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_GIC);
val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_GIC), 1);
Expand Down Expand Up @@ -153,7 +147,12 @@ static bool access_id_reg(struct kvm_vcpu *vcpu,
static int get_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
u64 *val)
{
struct kvm_arch *arch = &vcpu->kvm->arch;

mutex_lock(&arch->config_lock);
*val = read_id_reg(vcpu, rd);
mutex_unlock(&arch->config_lock);

return 0;
}

Expand Down Expand Up @@ -200,7 +199,10 @@ static int set_id_aa64pfr0_el1(struct kvm_vcpu *vcpu,
const struct sys_reg_desc *rd,
u64 val)
{
struct kvm_arch *arch = &vcpu->kvm->arch;
u64 sval = val;
u8 csv2, csv3;
int ret = 0;

/*
* Allow AA64PFR0_EL1.CSV2 to be set from userspace as long as
Expand All @@ -216,17 +218,26 @@ static int set_id_aa64pfr0_el1(struct kvm_vcpu *vcpu,
if (csv3 > 1 || (csv3 && arm64_get_meltdown_state() != SPECTRE_UNAFFECTED))
return -EINVAL;

mutex_lock(&arch->config_lock);
/* We can only differ with CSV[23], and anything else is an error */
val ^= read_id_reg(vcpu, rd);
val &= ~(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2) |
ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3));
if (val)
return -EINVAL;

vcpu->kvm->arch.pfr0_csv2 = csv2;
vcpu->kvm->arch.pfr0_csv3 = csv3;
if (val) {
ret = -EINVAL;
goto out;
}

return 0;
/* Only allow userspace to change the idregs before VM running */
if (test_bit(KVM_ARCH_FLAG_HAS_RAN_ONCE, &vcpu->kvm->arch.flags)) {
if (sval != read_id_reg(vcpu, rd))
ret = -EBUSY;
} else {
IDREG(vcpu->kvm, reg_to_encoding(rd)) = sval;
}
out:
mutex_unlock(&arch->config_lock);
return ret;
}

static int set_id_aa64dfr0_el1(struct kvm_vcpu *vcpu,
Expand Down Expand Up @@ -485,4 +496,25 @@ void kvm_arm_init_id_regs(struct kvm *kvm)
val = read_sanitised_ftr_reg(id);
IDREG(kvm, id) = val;
}

/*
* The default is to expose CSV2 == 1 if the HW isn't affected.
* Although this is a per-CPU feature, we make it global because
* asymmetric systems are just a nuisance.
*
* Userspace can override this as long as it doesn't promise
* the impossible.
*/
val = IDREG(kvm, SYS_ID_AA64PFR0_EL1);

if (arm64_get_spectre_v2_state() == SPECTRE_UNAFFECTED) {
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2);
val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2), 1);
}
if (arm64_get_meltdown_state() == SPECTRE_UNAFFECTED) {
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3);
val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3), 1);
}

IDREG(kvm, SYS_ID_AA64PFR0_EL1) = val;
}

0 comments on commit 5c88787

Please sign in to comment.