Skip to content

Commit

Permalink
Merge tag 'v5.15.104' into HEAD
Browse files Browse the repository at this point in the history
This is the 5.15.104 stable release

Signed-off-by: Markus Niebel <Markus.Niebel@ew.tq-group.com>
  • Loading branch information
Markus Niebel authored and Markus Niebel committed Mar 30, 2023
2 parents b2e79e0 + 1154723 commit 1524008
Show file tree
Hide file tree
Showing 120 changed files with 919 additions and 439 deletions.
2 changes: 1 addition & 1 deletion Documentation/filesystems/vfs.rst
Expand Up @@ -1210,7 +1210,7 @@ defined:
return
-ECHILD and it will be called again in ref-walk mode.

``_weak_revalidate``
``d_weak_revalidate``
called when the VFS needs to revalidate a "jumped" dentry. This
is called when a path-walk ends at dentry that was not acquired
by doing a lookup in the parent directory. This includes "/",
Expand Down
2 changes: 1 addition & 1 deletion Makefile
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
PATCHLEVEL = 15
SUBLEVEL = 103
SUBLEVEL = 104
EXTRAVERSION =
NAME = Trick or Treat

Expand Down
2 changes: 0 additions & 2 deletions arch/riscv/include/asm/mmu.h
Expand Up @@ -19,8 +19,6 @@ typedef struct {
#ifdef CONFIG_SMP
/* A local icache flush is needed before user execution can resume. */
cpumask_t icache_stale_mask;
/* A local tlb flush is needed before user execution can resume. */
cpumask_t tlb_stale_mask;
#endif
} mm_context_t;

Expand Down
18 changes: 0 additions & 18 deletions arch/riscv/include/asm/tlbflush.h
Expand Up @@ -22,24 +22,6 @@ static inline void local_flush_tlb_page(unsigned long addr)
{
ALT_FLUSH_TLB_PAGE(__asm__ __volatile__ ("sfence.vma %0" : : "r" (addr) : "memory"));
}

static inline void local_flush_tlb_all_asid(unsigned long asid)
{
__asm__ __volatile__ ("sfence.vma x0, %0"
:
: "r" (asid)
: "memory");
}

static inline void local_flush_tlb_page_asid(unsigned long addr,
unsigned long asid)
{
__asm__ __volatile__ ("sfence.vma %0, %1"
:
: "r" (addr), "r" (asid)
: "memory");
}

#else /* CONFIG_MMU */
#define local_flush_tlb_all() do { } while (0)
#define local_flush_tlb_page(addr) do { } while (0)
Expand Down
40 changes: 20 additions & 20 deletions arch/riscv/mm/context.c
Expand Up @@ -196,16 +196,6 @@ static void set_mm_asid(struct mm_struct *mm, unsigned int cpu)

if (need_flush_tlb)
local_flush_tlb_all();
#ifdef CONFIG_SMP
else {
cpumask_t *mask = &mm->context.tlb_stale_mask;

if (cpumask_test_cpu(cpu, mask)) {
cpumask_clear_cpu(cpu, mask);
local_flush_tlb_all_asid(cntx & asid_mask);
}
}
#endif
}

static void set_mm_noasid(struct mm_struct *mm)
Expand All @@ -215,12 +205,24 @@ static void set_mm_noasid(struct mm_struct *mm)
local_flush_tlb_all();
}

static inline void set_mm(struct mm_struct *mm, unsigned int cpu)
static inline void set_mm(struct mm_struct *prev,
struct mm_struct *next, unsigned int cpu)
{
if (static_branch_unlikely(&use_asid_allocator))
set_mm_asid(mm, cpu);
else
set_mm_noasid(mm);
/*
* The mm_cpumask indicates which harts' TLBs contain the virtual
* address mapping of the mm. Compared to noasid, using asid
* can't guarantee that stale TLB entries are invalidated because
* the asid mechanism wouldn't flush TLB for every switch_mm for
* performance. So when using asid, keep all CPUs footmarks in
* cpumask() until mm reset.
*/
cpumask_set_cpu(cpu, mm_cpumask(next));
if (static_branch_unlikely(&use_asid_allocator)) {
set_mm_asid(next, cpu);
} else {
cpumask_clear_cpu(cpu, mm_cpumask(prev));
set_mm_noasid(next);
}
}

static int __init asids_init(void)
Expand Down Expand Up @@ -272,7 +274,8 @@ static int __init asids_init(void)
}
early_initcall(asids_init);
#else
static inline void set_mm(struct mm_struct *mm, unsigned int cpu)
static inline void set_mm(struct mm_struct *prev,
struct mm_struct *next, unsigned int cpu)
{
/* Nothing to do here when there is no MMU */
}
Expand Down Expand Up @@ -325,10 +328,7 @@ void switch_mm(struct mm_struct *prev, struct mm_struct *next,
*/
cpu = smp_processor_id();

cpumask_clear_cpu(cpu, mm_cpumask(prev));
cpumask_set_cpu(cpu, mm_cpumask(next));

set_mm(next, cpu);
set_mm(prev, next, cpu);

flush_icache_deferred(next, cpu);
}
28 changes: 17 additions & 11 deletions arch/riscv/mm/tlbflush.c
Expand Up @@ -5,7 +5,23 @@
#include <linux/sched.h>
#include <asm/sbi.h>
#include <asm/mmu_context.h>
#include <asm/tlbflush.h>

static inline void local_flush_tlb_all_asid(unsigned long asid)
{
__asm__ __volatile__ ("sfence.vma x0, %0"
:
: "r" (asid)
: "memory");
}

static inline void local_flush_tlb_page_asid(unsigned long addr,
unsigned long asid)
{
__asm__ __volatile__ ("sfence.vma %0, %1"
:
: "r" (addr), "r" (asid)
: "memory");
}

void flush_tlb_all(void)
{
Expand All @@ -15,7 +31,6 @@ void flush_tlb_all(void)
static void __sbi_tlb_flush_range(struct mm_struct *mm, unsigned long start,
unsigned long size, unsigned long stride)
{
struct cpumask *pmask = &mm->context.tlb_stale_mask;
struct cpumask *cmask = mm_cpumask(mm);
struct cpumask hmask;
unsigned int cpuid;
Expand All @@ -30,15 +45,6 @@ static void __sbi_tlb_flush_range(struct mm_struct *mm, unsigned long start,
if (static_branch_unlikely(&use_asid_allocator)) {
unsigned long asid = atomic_long_read(&mm->context.id);

/*
* TLB will be immediately flushed on harts concurrently
* executing this MM context. TLB flush on other harts
* is deferred until this MM context migrates there.
*/
cpumask_setall(pmask);
cpumask_clear_cpu(cpuid, pmask);
cpumask_andnot(pmask, pmask, cmask);

if (broadcast) {
riscv_cpuid_to_hartid_mask(cmask, &hmask);
sbi_remote_sfence_vma_asid(cpumask_bits(&hmask),
Expand Down
8 changes: 8 additions & 0 deletions arch/s390/boot/ipl_report.c
Expand Up @@ -57,11 +57,19 @@ static unsigned long find_bootdata_space(struct ipl_rb_components *comps,
if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && initrd_data.start && initrd_data.size &&
intersects(initrd_data.start, initrd_data.size, safe_addr, size))
safe_addr = initrd_data.start + initrd_data.size;
if (intersects(safe_addr, size, (unsigned long)comps, comps->len)) {
safe_addr = (unsigned long)comps + comps->len;
goto repeat;
}
for_each_rb_entry(comp, comps)
if (intersects(safe_addr, size, comp->addr, comp->len)) {
safe_addr = comp->addr + comp->len;
goto repeat;
}
if (intersects(safe_addr, size, (unsigned long)certs, certs->len)) {
safe_addr = (unsigned long)certs + certs->len;
goto repeat;
}
for_each_rb_entry(cert, certs)
if (intersects(safe_addr, size, cert->addr, cert->len)) {
safe_addr = cert->addr + cert->len;
Expand Down
16 changes: 10 additions & 6 deletions arch/s390/pci/pci.c
Expand Up @@ -503,8 +503,7 @@ static struct resource *__alloc_res(struct zpci_dev *zdev, unsigned long start,
return r;
}

int zpci_setup_bus_resources(struct zpci_dev *zdev,
struct list_head *resources)
int zpci_setup_bus_resources(struct zpci_dev *zdev)
{
unsigned long addr, size, flags;
struct resource *res;
Expand Down Expand Up @@ -540,7 +539,6 @@ int zpci_setup_bus_resources(struct zpci_dev *zdev,
return -ENOMEM;
}
zdev->bars[i].res = res;
pci_add_resource(resources, res);
}
zdev->has_resources = 1;

Expand All @@ -549,17 +547,23 @@ int zpci_setup_bus_resources(struct zpci_dev *zdev,

static void zpci_cleanup_bus_resources(struct zpci_dev *zdev)
{
struct resource *res;
int i;

pci_lock_rescan_remove();
for (i = 0; i < PCI_STD_NUM_BARS; i++) {
if (!zdev->bars[i].size || !zdev->bars[i].res)
res = zdev->bars[i].res;
if (!res)
continue;

release_resource(res);
pci_bus_remove_resource(zdev->zbus->bus, res);
zpci_free_iomap(zdev, zdev->bars[i].map_idx);
release_resource(zdev->bars[i].res);
kfree(zdev->bars[i].res);
zdev->bars[i].res = NULL;
kfree(res);
}
zdev->has_resources = 0;
pci_unlock_rescan_remove();
}

int pcibios_add_device(struct pci_dev *pdev)
Expand Down
12 changes: 5 additions & 7 deletions arch/s390/pci/pci_bus.c
Expand Up @@ -41,9 +41,7 @@ static int zpci_nb_devices;
*/
static int zpci_bus_prepare_device(struct zpci_dev *zdev)
{
struct resource_entry *window, *n;
struct resource *res;
int rc;
int rc, i;

if (!zdev_enabled(zdev)) {
rc = zpci_enable_device(zdev);
Expand All @@ -57,10 +55,10 @@ static int zpci_bus_prepare_device(struct zpci_dev *zdev)
}

if (!zdev->has_resources) {
zpci_setup_bus_resources(zdev, &zdev->zbus->resources);
resource_list_for_each_entry_safe(window, n, &zdev->zbus->resources) {
res = window->res;
pci_bus_add_resource(zdev->zbus->bus, res, 0);
zpci_setup_bus_resources(zdev);
for (i = 0; i < PCI_STD_NUM_BARS; i++) {
if (zdev->bars[i].res)
pci_bus_add_resource(zdev->zbus->bus, zdev->bars[i].res, 0);
}
}

Expand Down
3 changes: 1 addition & 2 deletions arch/s390/pci/pci_bus.h
Expand Up @@ -30,8 +30,7 @@ static inline void zpci_zdev_get(struct zpci_dev *zdev)

int zpci_alloc_domain(int domain);
void zpci_free_domain(int domain);
int zpci_setup_bus_resources(struct zpci_dev *zdev,
struct list_head *resources);
int zpci_setup_bus_resources(struct zpci_dev *zdev);

static inline struct zpci_dev *get_zdev_by_bus(struct pci_bus *bus,
unsigned int devfn)
Expand Down
1 change: 1 addition & 0 deletions arch/x86/kernel/cpu/mce/core.c
Expand Up @@ -2302,6 +2302,7 @@ static void mce_restart(void)
{
mce_timer_delete_all();
on_each_cpu(mce_cpu_restart, NULL, 1);
mce_schedule_work();
}

/* Toggle features for corrected errors */
Expand Down
7 changes: 2 additions & 5 deletions arch/x86/kernel/cpu/resctrl/ctrlmondata.c
Expand Up @@ -353,7 +353,6 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
{
struct resctrl_schema *s;
struct rdtgroup *rdtgrp;
struct rdt_domain *dom;
struct rdt_resource *r;
char *tok, *resname;
int ret = 0;
Expand Down Expand Up @@ -382,10 +381,7 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
goto out;
}

list_for_each_entry(s, &resctrl_schema_all, list) {
list_for_each_entry(dom, &s->res->domains, list)
memset(dom->staged_config, 0, sizeof(dom->staged_config));
}
rdt_staged_configs_clear();

while ((tok = strsep(&buf, "\n")) != NULL) {
resname = strim(strsep(&tok, ":"));
Expand Down Expand Up @@ -422,6 +418,7 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
}

out:
rdt_staged_configs_clear();
rdtgroup_kn_unlock(of->kn);
cpus_read_unlock();
return ret ?: nbytes;
Expand Down
1 change: 1 addition & 0 deletions arch/x86/kernel/cpu/resctrl/internal.h
Expand Up @@ -550,5 +550,6 @@ bool has_busy_rmid(struct rdt_resource *r, struct rdt_domain *d);
void __check_limbo(struct rdt_domain *d, bool force_free);
void rdt_domain_reconfigure_cdp(struct rdt_resource *r);
void __init thread_throttle_mode_init(void);
void rdt_staged_configs_clear(void);

#endif /* _ASM_X86_RESCTRL_INTERNAL_H */
25 changes: 21 additions & 4 deletions arch/x86/kernel/cpu/resctrl/rdtgroup.c
Expand Up @@ -78,6 +78,19 @@ void rdt_last_cmd_printf(const char *fmt, ...)
va_end(ap);
}

void rdt_staged_configs_clear(void)
{
struct rdt_resource *r;
struct rdt_domain *dom;

lockdep_assert_held(&rdtgroup_mutex);

for_each_alloc_capable_rdt_resource(r) {
list_for_each_entry(dom, &r->domains, list)
memset(dom->staged_config, 0, sizeof(dom->staged_config));
}
}

/*
* Trivial allocator for CLOSIDs. Since h/w only supports a small number,
* we can keep a bitmap of free CLOSIDs in a single integer.
Expand Down Expand Up @@ -2813,7 +2826,9 @@ static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp)
{
struct resctrl_schema *s;
struct rdt_resource *r;
int ret;
int ret = 0;

rdt_staged_configs_clear();

list_for_each_entry(s, &resctrl_schema_all, list) {
r = s->res;
Expand All @@ -2822,20 +2837,22 @@ static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp)
} else {
ret = rdtgroup_init_cat(s, rdtgrp->closid);
if (ret < 0)
return ret;
goto out;
}

ret = resctrl_arch_update_domains(r, rdtgrp->closid);
if (ret < 0) {
rdt_last_cmd_puts("Failed to initialize allocations\n");
return ret;
goto out;
}

}

rdtgrp->mode = RDT_MODE_SHAREABLE;

return 0;
out:
rdt_staged_configs_clear();
return ret;
}

static int mkdir_rdt_prepare(struct kernfs_node *parent_kn,
Expand Down

0 comments on commit 1524008

Please sign in to comment.