Skip to content

Commit

Permalink
Merge tag 'v5.14.8' into 5.14
Browse files Browse the repository at this point in the history
This is the 5.14.8 stable release
  • Loading branch information
xanmod committed Sep 26, 2021
2 parents bf3b422 + c34892e commit b772e1d
Show file tree
Hide file tree
Showing 113 changed files with 971 additions and 466 deletions.
2 changes: 1 addition & 1 deletion Documentation/driver-api/cxl/memory-devices.rst
Expand Up @@ -36,7 +36,7 @@ CXL Core
.. kernel-doc:: drivers/cxl/cxl.h
:internal:

.. kernel-doc:: drivers/cxl/core.c
.. kernel-doc:: drivers/cxl/core/bus.c
:doc: cxl core

External Interfaces
Expand Down
2 changes: 1 addition & 1 deletion Makefile
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
PATCHLEVEL = 14
SUBLEVEL = 7
SUBLEVEL = 8
EXTRAVERSION =
NAME = Opossums on Parade

Expand Down
7 changes: 2 additions & 5 deletions arch/arm64/kernel/cacheinfo.c
Expand Up @@ -43,7 +43,7 @@ static void ci_leaf_init(struct cacheinfo *this_leaf,
this_leaf->type = type;
}

static int __init_cache_level(unsigned int cpu)
int init_cache_level(unsigned int cpu)
{
unsigned int ctype, level, leaves, fw_level;
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
Expand Down Expand Up @@ -78,7 +78,7 @@ static int __init_cache_level(unsigned int cpu)
return 0;
}

static int __populate_cache_leaves(unsigned int cpu)
int populate_cache_leaves(unsigned int cpu)
{
unsigned int level, idx;
enum cache_type type;
Expand All @@ -97,6 +97,3 @@ static int __populate_cache_leaves(unsigned int cpu)
}
return 0;
}

DEFINE_SMP_CALL_CACHE_FUNCTION(init_cache_level)
DEFINE_SMP_CALL_CACHE_FUNCTION(populate_cache_leaves)
16 changes: 15 additions & 1 deletion arch/arm64/mm/init.c
Expand Up @@ -319,7 +319,21 @@ static void __init fdt_enforce_memory_region(void)

void __init arm64_memblock_init(void)
{
const s64 linear_region_size = PAGE_END - _PAGE_OFFSET(vabits_actual);
s64 linear_region_size = PAGE_END - _PAGE_OFFSET(vabits_actual);

/*
* Corner case: 52-bit VA capable systems running KVM in nVHE mode may
* be limited in their ability to support a linear map that exceeds 51
* bits of VA space, depending on the placement of the ID map. Given
* that the placement of the ID map may be randomized, let's simply
* limit the kernel's linear map to 51 bits as well if we detect this
* configuration.
*/
if (IS_ENABLED(CONFIG_KVM) && vabits_actual == 52 &&
is_hyp_mode_available() && !is_kernel_in_hyp_mode()) {
pr_info("Capping linear region to 51 bits for KVM in nVHE mode on LVA capable hardware.\n");
linear_region_size = min_t(u64, linear_region_size, BIT(51));
}

/* Handle linux,usable-memory-range property */
fdt_enforce_memory_region();
Expand Down
7 changes: 2 additions & 5 deletions arch/mips/kernel/cacheinfo.c
Expand Up @@ -17,7 +17,7 @@ do { \
leaf++; \
} while (0)

static int __init_cache_level(unsigned int cpu)
int init_cache_level(unsigned int cpu)
{
struct cpuinfo_mips *c = &current_cpu_data;
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
Expand Down Expand Up @@ -74,7 +74,7 @@ static void fill_cpumask_cluster(int cpu, cpumask_t *cpu_map)
cpumask_set_cpu(cpu1, cpu_map);
}

static int __populate_cache_leaves(unsigned int cpu)
int populate_cache_leaves(unsigned int cpu)
{
struct cpuinfo_mips *c = &current_cpu_data;
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
Expand Down Expand Up @@ -114,6 +114,3 @@ static int __populate_cache_leaves(unsigned int cpu)

return 0;
}

DEFINE_SMP_CALL_CACHE_FUNCTION(init_cache_level)
DEFINE_SMP_CALL_CACHE_FUNCTION(populate_cache_leaves)
6 changes: 5 additions & 1 deletion arch/riscv/boot/dts/microchip/microchip-mpfs-icicle-kit.dts
Expand Up @@ -16,10 +16,14 @@

aliases {
ethernet0 = &emac1;
serial0 = &serial0;
serial1 = &serial1;
serial2 = &serial2;
serial3 = &serial3;
};

chosen {
stdout-path = &serial0;
stdout-path = "serial0:115200n8";
};

cpus {
Expand Down
7 changes: 2 additions & 5 deletions arch/riscv/kernel/cacheinfo.c
Expand Up @@ -113,7 +113,7 @@ static void fill_cacheinfo(struct cacheinfo **this_leaf,
}
}

static int __init_cache_level(unsigned int cpu)
int init_cache_level(unsigned int cpu)
{
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
struct device_node *np = of_cpu_device_node_get(cpu);
Expand Down Expand Up @@ -155,7 +155,7 @@ static int __init_cache_level(unsigned int cpu)
return 0;
}

static int __populate_cache_leaves(unsigned int cpu)
int populate_cache_leaves(unsigned int cpu)
{
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
struct cacheinfo *this_leaf = this_cpu_ci->info_list;
Expand Down Expand Up @@ -187,6 +187,3 @@ static int __populate_cache_leaves(unsigned int cpu)

return 0;
}

DEFINE_SMP_CALL_CACHE_FUNCTION(init_cache_level)
DEFINE_SMP_CALL_CACHE_FUNCTION(populate_cache_leaves)
20 changes: 10 additions & 10 deletions arch/s390/include/asm/stacktrace.h
Expand Up @@ -34,16 +34,6 @@ static inline bool on_stack(struct stack_info *info,
return addr >= info->begin && addr + len <= info->end;
}

static __always_inline unsigned long get_stack_pointer(struct task_struct *task,
struct pt_regs *regs)
{
if (regs)
return (unsigned long) kernel_stack_pointer(regs);
if (task == current)
return current_stack_pointer();
return (unsigned long) task->thread.ksp;
}

/*
* Stack layout of a C stack frame.
*/
Expand Down Expand Up @@ -74,6 +64,16 @@ struct stack_frame {
((unsigned long)__builtin_frame_address(0) - \
offsetof(struct stack_frame, back_chain))

static __always_inline unsigned long get_stack_pointer(struct task_struct *task,
struct pt_regs *regs)
{
if (regs)
return (unsigned long)kernel_stack_pointer(regs);
if (task == current)
return current_frame_address();
return (unsigned long)task->thread.ksp;
}

/*
* To keep this simple mark register 2-6 as being changed (volatile)
* by the called function, even though register 6 is saved/nonvolatile.
Expand Down
8 changes: 4 additions & 4 deletions arch/s390/include/asm/unwind.h
Expand Up @@ -55,10 +55,10 @@ static inline bool unwind_error(struct unwind_state *state)
return state->error;
}

static inline void unwind_start(struct unwind_state *state,
struct task_struct *task,
struct pt_regs *regs,
unsigned long first_frame)
static __always_inline void unwind_start(struct unwind_state *state,
struct task_struct *task,
struct pt_regs *regs,
unsigned long first_frame)
{
task = task ?: current;
first_frame = first_frame ?: get_stack_pointer(task, regs);
Expand Down
4 changes: 2 additions & 2 deletions arch/s390/kernel/entry.S
Expand Up @@ -140,10 +140,10 @@ _LPP_OFFSET = __LC_LPP
TSTMSK __LC_MCCK_CODE,(MCCK_CODE_STG_ERROR|MCCK_CODE_STG_KEY_ERROR)
jnz \errlabel
TSTMSK __LC_MCCK_CODE,MCCK_CODE_STG_DEGRAD
jz oklabel\@
jz .Loklabel\@
TSTMSK __LC_MCCK_CODE,MCCK_CODE_STG_FAIL_ADDR
jnz \errlabel
oklabel\@:
.Loklabel\@:
.endm

#if IS_ENABLED(CONFIG_KVM)
Expand Down
10 changes: 7 additions & 3 deletions arch/s390/kernel/setup.c
Expand Up @@ -50,6 +50,7 @@
#include <linux/compat.h>
#include <linux/start_kernel.h>
#include <linux/hugetlb.h>
#include <linux/kmemleak.h>

#include <asm/boot_data.h>
#include <asm/ipl.h>
Expand Down Expand Up @@ -312,9 +313,12 @@ void *restart_stack;
unsigned long stack_alloc(void)
{
#ifdef CONFIG_VMAP_STACK
return (unsigned long)__vmalloc_node(THREAD_SIZE, THREAD_SIZE,
THREADINFO_GFP, NUMA_NO_NODE,
__builtin_return_address(0));
void *ret;

ret = __vmalloc_node(THREAD_SIZE, THREAD_SIZE, THREADINFO_GFP,
NUMA_NO_NODE, __builtin_return_address(0));
kmemleak_not_leak(ret);
return (unsigned long)ret;
#else
return __get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER);
#endif
Expand Down
4 changes: 3 additions & 1 deletion arch/um/drivers/virtio_uml.c
Expand Up @@ -1139,7 +1139,7 @@ static int virtio_uml_probe(struct platform_device *pdev)
rc = os_connect_socket(pdata->socket_path);
} while (rc == -EINTR);
if (rc < 0)
return rc;
goto error_free;
vu_dev->sock = rc;

spin_lock_init(&vu_dev->sock_lock);
Expand All @@ -1160,6 +1160,8 @@ static int virtio_uml_probe(struct platform_device *pdev)

error_init:
os_close_file(vu_dev->sock);
error_free:
kfree(vu_dev);
return rc;
}

Expand Down
3 changes: 1 addition & 2 deletions arch/um/kernel/skas/clone.c
Expand Up @@ -24,8 +24,7 @@
void __attribute__ ((__section__ (".__syscall_stub")))
stub_clone_handler(void)
{
int stack;
struct stub_data *data = (void *) ((unsigned long)&stack & ~(UM_KERN_PAGE_SIZE - 1));
struct stub_data *data = get_stub_page();
long err;

err = stub_syscall2(__NR_clone, CLONE_PARENT | CLONE_FILES | SIGCHLD,
Expand Down
7 changes: 2 additions & 5 deletions arch/x86/kernel/cpu/cacheinfo.c
Expand Up @@ -985,7 +985,7 @@ static void ci_leaf_init(struct cacheinfo *this_leaf,
this_leaf->priv = base->nb;
}

static int __init_cache_level(unsigned int cpu)
int init_cache_level(unsigned int cpu)
{
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);

Expand Down Expand Up @@ -1014,7 +1014,7 @@ static void get_cache_id(int cpu, struct _cpuid4_info_regs *id4_regs)
id4_regs->id = c->apicid >> index_msb;
}

static int __populate_cache_leaves(unsigned int cpu)
int populate_cache_leaves(unsigned int cpu)
{
unsigned int idx, ret;
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
Expand All @@ -1033,6 +1033,3 @@ static int __populate_cache_leaves(unsigned int cpu)

return 0;
}

DEFINE_SMP_CALL_CACHE_FUNCTION(init_cache_level)
DEFINE_SMP_CALL_CACHE_FUNCTION(populate_cache_leaves)
12 changes: 12 additions & 0 deletions arch/x86/um/shared/sysdep/stub_32.h
Expand Up @@ -101,4 +101,16 @@ static inline void remap_stack_and_trap(void)
"memory");
}

static __always_inline void *get_stub_page(void)
{
unsigned long ret;

asm volatile (
"movl %%esp,%0 ;"
"andl %1,%0"
: "=a" (ret)
: "g" (~(UM_KERN_PAGE_SIZE - 1)));

return (void *)ret;
}
#endif
12 changes: 12 additions & 0 deletions arch/x86/um/shared/sysdep/stub_64.h
Expand Up @@ -108,4 +108,16 @@ static inline void remap_stack_and_trap(void)
__syscall_clobber, "r10", "r8", "r9");
}

static __always_inline void *get_stub_page(void)
{
unsigned long ret;

asm volatile (
"movq %%rsp,%0 ;"
"andq %1,%0"
: "=a" (ret)
: "g" (~(UM_KERN_PAGE_SIZE - 1)));

return (void *)ret;
}
#endif
3 changes: 1 addition & 2 deletions arch/x86/um/stub_segv.c
Expand Up @@ -11,9 +11,8 @@
void __attribute__ ((__section__ (".__syscall_stub")))
stub_segv_handler(int sig, siginfo_t *info, void *p)
{
int stack;
struct faultinfo *f = get_stub_page();
ucontext_t *uc = p;
struct faultinfo *f = (void *)(((unsigned long)&stack) & ~(UM_KERN_PAGE_SIZE - 1));

GET_FAULTINFO_FROM_MC(*f, &uc->uc_mcontext);
trap_myself();
Expand Down
14 changes: 13 additions & 1 deletion block/blk-mq.c
Expand Up @@ -2135,6 +2135,18 @@ static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq)
}
}

/*
* Allow 4x BLK_MAX_REQUEST_COUNT requests on plug queue for multiple
* queues. This is important for md arrays to benefit from merging
* requests.
*/
static inline unsigned short blk_plug_max_rq_count(struct blk_plug *plug)
{
if (plug->multiple_queues)
return BLK_MAX_REQUEST_COUNT * 4;
return BLK_MAX_REQUEST_COUNT;
}

/**
* blk_mq_submit_bio - Create and send a request to block device.
* @bio: Bio pointer.
Expand Down Expand Up @@ -2231,7 +2243,7 @@ blk_qc_t blk_mq_submit_bio(struct bio *bio)
else
last = list_entry_rq(plug->mq_list.prev);

if (request_count >= BLK_MAX_REQUEST_COUNT || (last &&
if (request_count >= blk_plug_max_rq_count(plug) || (last &&
blk_rq_bytes(last) >= BLK_PLUG_FLUSH_SIZE)) {
blk_flush_plug_list(plug, false);
trace_block_plug(q);
Expand Down
1 change: 1 addition & 0 deletions block/blk-throttle.c
Expand Up @@ -2458,6 +2458,7 @@ int blk_throtl_init(struct request_queue *q)
void blk_throtl_exit(struct request_queue *q)
{
BUG_ON(!q->td);
del_timer_sync(&q->td->service_queue.pending_timer);
throtl_shutdown_wq(q);
blkcg_deactivate_policy(q, &blkcg_policy_throtl);
free_percpu(q->td->latency_buckets[READ]);
Expand Down

0 comments on commit b772e1d

Please sign in to comment.