Skip to content

Commit

Permalink
Merge tag 'v6.0.6' into 6.0
Browse files Browse the repository at this point in the history
This is the 6.0.6 stable release
  • Loading branch information
xanmod committed Oct 29, 2022
2 parents cf5b06a + e90fbe6 commit c892de7
Show file tree
Hide file tree
Showing 128 changed files with 887 additions and 440 deletions.
2 changes: 1 addition & 1 deletion Makefile
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 6
PATCHLEVEL = 0
SUBLEVEL = 5
SUBLEVEL = 6
EXTRAVERSION =
NAME = Hurr durr I'ma ninja sloth

Expand Down
5 changes: 4 additions & 1 deletion arch/arm64/kvm/vgic/vgic-its.c
Expand Up @@ -2149,7 +2149,7 @@ static int scan_its_table(struct vgic_its *its, gpa_t base, int size, u32 esz,

memset(entry, 0, esz);

while (len > 0) {
while (true) {
int next_offset;
size_t byte_offset;

Expand All @@ -2162,6 +2162,9 @@ static int scan_its_table(struct vgic_its *its, gpa_t base, int size, u32 esz,
return next_offset;

byte_offset = next_offset * esz;
if (byte_offset >= len)
break;

id += next_offset;
gpa += byte_offset;
len -= byte_offset;
Expand Down
1 change: 0 additions & 1 deletion arch/x86/Kconfig
Expand Up @@ -1962,7 +1962,6 @@ config EFI
config EFI_STUB
bool "EFI stub support"
depends on EFI
depends on $(cc-option,-mabi=ms) || X86_32
select RELOCATABLE
help
This kernel feature allows a bzImage to be loaded directly
Expand Down
4 changes: 3 additions & 1 deletion arch/x86/include/asm/iommu.h
Expand Up @@ -25,8 +25,10 @@ arch_rmrr_sanity_check(struct acpi_dmar_reserved_memory *rmrr)
{
u64 start = rmrr->base_address;
u64 end = rmrr->end_address + 1;
int entry_type;

if (e820__mapped_all(start, end, E820_TYPE_RESERVED))
entry_type = e820__get_entry_type(start, end);
if (entry_type == E820_TYPE_RESERVED || entry_type == E820_TYPE_NVS)
return 0;

pr_err(FW_BUG "No firmware reserved region can cover this RMRR [%#018Lx-%#018Lx], contact BIOS vendor for fixes\n",
Expand Down
16 changes: 13 additions & 3 deletions arch/x86/kernel/cpu/microcode/amd.c
Expand Up @@ -440,7 +440,13 @@ apply_microcode_early_amd(u32 cpuid_1_eax, void *ucode, size_t size, bool save_p
return ret;

native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
if (rev >= mc->hdr.patch_id)

/*
* Allow application of the same revision to pick up SMT-specific
* changes even if the revision of the other SMT thread is already
* up-to-date.
*/
if (rev > mc->hdr.patch_id)
return ret;

if (!__apply_microcode_amd(mc)) {
Expand Down Expand Up @@ -528,8 +534,12 @@ void load_ucode_amd_ap(unsigned int cpuid_1_eax)

native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);

/* Check whether we have saved a new patch already: */
if (*new_rev && rev < mc->hdr.patch_id) {
/*
* Check whether a new patch has been saved already. Also, allow application of
* the same revision in order to pick up SMT-thread-specific configuration even
* if the sibling SMT thread already has an up-to-date revision.
*/
if (*new_rev && rev <= mc->hdr.patch_id) {
if (!__apply_microcode_amd(mc)) {
*new_rev = mc->hdr.patch_id;
return;
Expand Down
8 changes: 2 additions & 6 deletions arch/x86/kernel/cpu/resctrl/core.c
Expand Up @@ -66,9 +66,6 @@ struct rdt_hw_resource rdt_resources_all[] = {
.rid = RDT_RESOURCE_L3,
.name = "L3",
.cache_level = 3,
.cache = {
.min_cbm_bits = 1,
},
.domains = domain_init(RDT_RESOURCE_L3),
.parse_ctrlval = parse_cbm,
.format_str = "%d=%0*x",
Expand All @@ -83,9 +80,6 @@ struct rdt_hw_resource rdt_resources_all[] = {
.rid = RDT_RESOURCE_L2,
.name = "L2",
.cache_level = 2,
.cache = {
.min_cbm_bits = 1,
},
.domains = domain_init(RDT_RESOURCE_L2),
.parse_ctrlval = parse_cbm,
.format_str = "%d=%0*x",
Expand Down Expand Up @@ -877,6 +871,7 @@ static __init void rdt_init_res_defs_intel(void)
r->cache.arch_has_sparse_bitmaps = false;
r->cache.arch_has_empty_bitmaps = false;
r->cache.arch_has_per_cpu_cfg = false;
r->cache.min_cbm_bits = 1;
} else if (r->rid == RDT_RESOURCE_MBA) {
hw_res->msr_base = MSR_IA32_MBA_THRTL_BASE;
hw_res->msr_update = mba_wrmsr_intel;
Expand All @@ -897,6 +892,7 @@ static __init void rdt_init_res_defs_amd(void)
r->cache.arch_has_sparse_bitmaps = true;
r->cache.arch_has_empty_bitmaps = true;
r->cache.arch_has_per_cpu_cfg = true;
r->cache.min_cbm_bits = 0;
} else if (r->rid == RDT_RESOURCE_MBA) {
hw_res->msr_base = MSR_IA32_MBA_BW_BASE;
hw_res->msr_update = mba_wrmsr_amd;
Expand Down
16 changes: 11 additions & 5 deletions arch/x86/kernel/cpu/topology.c
Expand Up @@ -96,6 +96,7 @@ int detect_extended_topology(struct cpuinfo_x86 *c)
unsigned int ht_mask_width, core_plus_mask_width, die_plus_mask_width;
unsigned int core_select_mask, core_level_siblings;
unsigned int die_select_mask, die_level_siblings;
unsigned int pkg_mask_width;
bool die_level_present = false;
int leaf;

Expand All @@ -111,10 +112,10 @@ int detect_extended_topology(struct cpuinfo_x86 *c)
core_level_siblings = smp_num_siblings = LEVEL_MAX_SIBLINGS(ebx);
core_plus_mask_width = ht_mask_width = BITS_SHIFT_NEXT_LEVEL(eax);
die_level_siblings = LEVEL_MAX_SIBLINGS(ebx);
die_plus_mask_width = BITS_SHIFT_NEXT_LEVEL(eax);
pkg_mask_width = die_plus_mask_width = BITS_SHIFT_NEXT_LEVEL(eax);

sub_index = 1;
do {
while (true) {
cpuid_count(leaf, sub_index, &eax, &ebx, &ecx, &edx);

/*
Expand All @@ -132,10 +133,15 @@ int detect_extended_topology(struct cpuinfo_x86 *c)
die_plus_mask_width = BITS_SHIFT_NEXT_LEVEL(eax);
}

if (LEAFB_SUBTYPE(ecx) != INVALID_TYPE)
pkg_mask_width = BITS_SHIFT_NEXT_LEVEL(eax);
else
break;

sub_index++;
} while (LEAFB_SUBTYPE(ecx) != INVALID_TYPE);
}

core_select_mask = (~(-1 << core_plus_mask_width)) >> ht_mask_width;
core_select_mask = (~(-1 << pkg_mask_width)) >> ht_mask_width;
die_select_mask = (~(-1 << die_plus_mask_width)) >>
core_plus_mask_width;

Expand All @@ -148,7 +154,7 @@ int detect_extended_topology(struct cpuinfo_x86 *c)
}

c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid,
die_plus_mask_width);
pkg_mask_width);
/*
* Reinit the apicid, now that we have extended initial_apicid.
*/
Expand Down
87 changes: 73 additions & 14 deletions arch/x86/kvm/x86.c
Expand Up @@ -6406,35 +6406,31 @@ static int kvm_add_msr_filter(struct kvm_x86_msr_filter *msr_filter,
return 0;
}

static int kvm_vm_ioctl_set_msr_filter(struct kvm *kvm, void __user *argp)
static int kvm_vm_ioctl_set_msr_filter(struct kvm *kvm,
struct kvm_msr_filter *filter)
{
struct kvm_msr_filter __user *user_msr_filter = argp;
struct kvm_x86_msr_filter *new_filter, *old_filter;
struct kvm_msr_filter filter;
bool default_allow;
bool empty = true;
int r = 0;
u32 i;

if (copy_from_user(&filter, user_msr_filter, sizeof(filter)))
return -EFAULT;

if (filter.flags & ~KVM_MSR_FILTER_DEFAULT_DENY)
if (filter->flags & ~KVM_MSR_FILTER_DEFAULT_DENY)
return -EINVAL;

for (i = 0; i < ARRAY_SIZE(filter.ranges); i++)
empty &= !filter.ranges[i].nmsrs;
for (i = 0; i < ARRAY_SIZE(filter->ranges); i++)
empty &= !filter->ranges[i].nmsrs;

default_allow = !(filter.flags & KVM_MSR_FILTER_DEFAULT_DENY);
default_allow = !(filter->flags & KVM_MSR_FILTER_DEFAULT_DENY);
if (empty && !default_allow)
return -EINVAL;

new_filter = kvm_alloc_msr_filter(default_allow);
if (!new_filter)
return -ENOMEM;

for (i = 0; i < ARRAY_SIZE(filter.ranges); i++) {
r = kvm_add_msr_filter(new_filter, &filter.ranges[i]);
for (i = 0; i < ARRAY_SIZE(filter->ranges); i++) {
r = kvm_add_msr_filter(new_filter, &filter->ranges[i]);
if (r) {
kvm_free_msr_filter(new_filter);
return r;
Expand All @@ -6457,6 +6453,62 @@ static int kvm_vm_ioctl_set_msr_filter(struct kvm *kvm, void __user *argp)
return 0;
}

#ifdef CONFIG_KVM_COMPAT
/* for KVM_X86_SET_MSR_FILTER */
struct kvm_msr_filter_range_compat {
__u32 flags;
__u32 nmsrs;
__u32 base;
__u32 bitmap;
};

struct kvm_msr_filter_compat {
__u32 flags;
struct kvm_msr_filter_range_compat ranges[KVM_MSR_FILTER_MAX_RANGES];
};

#define KVM_X86_SET_MSR_FILTER_COMPAT _IOW(KVMIO, 0xc6, struct kvm_msr_filter_compat)

long kvm_arch_vm_compat_ioctl(struct file *filp, unsigned int ioctl,
unsigned long arg)
{
void __user *argp = (void __user *)arg;
struct kvm *kvm = filp->private_data;
long r = -ENOTTY;

switch (ioctl) {
case KVM_X86_SET_MSR_FILTER_COMPAT: {
struct kvm_msr_filter __user *user_msr_filter = argp;
struct kvm_msr_filter_compat filter_compat;
struct kvm_msr_filter filter;
int i;

if (copy_from_user(&filter_compat, user_msr_filter,
sizeof(filter_compat)))
return -EFAULT;

filter.flags = filter_compat.flags;
for (i = 0; i < ARRAY_SIZE(filter.ranges); i++) {
struct kvm_msr_filter_range_compat *cr;

cr = &filter_compat.ranges[i];
filter.ranges[i] = (struct kvm_msr_filter_range) {
.flags = cr->flags,
.nmsrs = cr->nmsrs,
.base = cr->base,
.bitmap = (__u8 *)(ulong)cr->bitmap,
};
}

r = kvm_vm_ioctl_set_msr_filter(kvm, &filter);
break;
}
}

return r;
}
#endif

#ifdef CONFIG_HAVE_KVM_PM_NOTIFIER
static int kvm_arch_suspend_notifier(struct kvm *kvm)
{
Expand Down Expand Up @@ -6879,9 +6931,16 @@ long kvm_arch_vm_ioctl(struct file *filp,
case KVM_SET_PMU_EVENT_FILTER:
r = kvm_vm_ioctl_set_pmu_event_filter(kvm, argp);
break;
case KVM_X86_SET_MSR_FILTER:
r = kvm_vm_ioctl_set_msr_filter(kvm, argp);
case KVM_X86_SET_MSR_FILTER: {
struct kvm_msr_filter __user *user_msr_filter = argp;
struct kvm_msr_filter filter;

if (copy_from_user(&filter, user_msr_filter, sizeof(filter)))
return -EFAULT;

r = kvm_vm_ioctl_set_msr_filter(kvm, &filter);
break;
}
default:
r = -ENOTTY;
}
Expand Down
7 changes: 5 additions & 2 deletions block/blk-mq.c
Expand Up @@ -3028,8 +3028,11 @@ static void blk_mq_clear_rq_mapping(struct blk_mq_tags *drv_tags,
struct page *page;
unsigned long flags;

/* There is no need to clear a driver tags own mapping */
if (drv_tags == tags)
/*
* There is no need to clear mapping if driver tags is not initialized
* or the mapping belongs to the driver tags.
*/
if (!drv_tags || drv_tags == tags)
return;

list_for_each_entry(page, &tags->page_list, lru) {
Expand Down
33 changes: 20 additions & 13 deletions drivers/acpi/acpi_extlog.c
Expand Up @@ -12,6 +12,7 @@
#include <linux/ratelimit.h>
#include <linux/edac.h>
#include <linux/ras.h>
#include <acpi/ghes.h>
#include <asm/cpu.h>
#include <asm/mce.h>

Expand Down Expand Up @@ -138,8 +139,8 @@ static int extlog_print(struct notifier_block *nb, unsigned long val,
int cpu = mce->extcpu;
struct acpi_hest_generic_status *estatus, *tmp;
struct acpi_hest_generic_data *gdata;
const guid_t *fru_id = &guid_null;
char *fru_text = "";
const guid_t *fru_id;
char *fru_text;
guid_t *sec_type;
static u32 err_seq;

Expand All @@ -160,17 +161,23 @@ static int extlog_print(struct notifier_block *nb, unsigned long val,

/* log event via trace */
err_seq++;
gdata = (struct acpi_hest_generic_data *)(tmp + 1);
if (gdata->validation_bits & CPER_SEC_VALID_FRU_ID)
fru_id = (guid_t *)gdata->fru_id;
if (gdata->validation_bits & CPER_SEC_VALID_FRU_TEXT)
fru_text = gdata->fru_text;
sec_type = (guid_t *)gdata->section_type;
if (guid_equal(sec_type, &CPER_SEC_PLATFORM_MEM)) {
struct cper_sec_mem_err *mem = (void *)(gdata + 1);
if (gdata->error_data_length >= sizeof(*mem))
trace_extlog_mem_event(mem, err_seq, fru_id, fru_text,
(u8)gdata->error_severity);
apei_estatus_for_each_section(tmp, gdata) {
if (gdata->validation_bits & CPER_SEC_VALID_FRU_ID)
fru_id = (guid_t *)gdata->fru_id;
else
fru_id = &guid_null;
if (gdata->validation_bits & CPER_SEC_VALID_FRU_TEXT)
fru_text = gdata->fru_text;
else
fru_text = "";
sec_type = (guid_t *)gdata->section_type;
if (guid_equal(sec_type, &CPER_SEC_PLATFORM_MEM)) {
struct cper_sec_mem_err *mem = (void *)(gdata + 1);

if (gdata->error_data_length >= sizeof(*mem))
trace_extlog_mem_event(mem, err_seq, fru_id, fru_text,
(u8)gdata->error_severity);
}
}

out:
Expand Down

0 comments on commit c892de7

Please sign in to comment.