Skip to content

Commit

Permalink
Merge tag 'v5.14.10' into 5.14
Browse files Browse the repository at this point in the history
This is the 5.14.10 stable release
  • Loading branch information
xanmod committed Oct 7, 2021
2 parents 9bb7728 + b133f07 commit bad2461
Show file tree
Hide file tree
Showing 188 changed files with 1,839 additions and 861 deletions.
2 changes: 1 addition & 1 deletion Makefile
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
PATCHLEVEL = 14
SUBLEVEL = 9
SUBLEVEL = 10
EXTRAVERSION =
NAME = Opossums on Parade

Expand Down
2 changes: 2 additions & 0 deletions arch/m68k/kernel/entry.S
Expand Up @@ -186,6 +186,8 @@ ENTRY(ret_from_signal)
movel %curptr@(TASK_STACK),%a1
tstb %a1@(TINFO_FLAGS+2)
jge 1f
lea %sp@(SWITCH_STACK_SIZE),%a1
movel %a1,%curptr@(TASK_THREAD+THREAD_ESP0)
jbsr syscall_trace
1: RESTORE_SWITCH_STACK
addql #4,%sp
Expand Down
57 changes: 43 additions & 14 deletions arch/mips/net/bpf_jit.c
Expand Up @@ -662,6 +662,11 @@ static void build_epilogue(struct jit_ctx *ctx)
((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative : func) : \
func##_positive)

static bool is_bad_offset(int b_off)
{
return b_off > 0x1ffff || b_off < -0x20000;
}

static int build_body(struct jit_ctx *ctx)
{
const struct bpf_prog *prog = ctx->skf;
Expand Down Expand Up @@ -728,7 +733,10 @@ static int build_body(struct jit_ctx *ctx)
/* Load return register on DS for failures */
emit_reg_move(r_ret, r_zero, ctx);
/* Return with error */
emit_b(b_imm(prog->len, ctx), ctx);
b_off = b_imm(prog->len, ctx);
if (is_bad_offset(b_off))
return -E2BIG;
emit_b(b_off, ctx);
emit_nop(ctx);
break;
case BPF_LD | BPF_W | BPF_IND:
Expand Down Expand Up @@ -775,8 +783,10 @@ static int build_body(struct jit_ctx *ctx)
emit_jalr(MIPS_R_RA, r_s0, ctx);
emit_reg_move(MIPS_R_A0, r_skb, ctx); /* delay slot */
/* Check the error value */
emit_bcond(MIPS_COND_NE, r_ret, 0,
b_imm(prog->len, ctx), ctx);
b_off = b_imm(prog->len, ctx);
if (is_bad_offset(b_off))
return -E2BIG;
emit_bcond(MIPS_COND_NE, r_ret, 0, b_off, ctx);
emit_reg_move(r_ret, r_zero, ctx);
/* We are good */
/* X <- P[1:K] & 0xf */
Expand Down Expand Up @@ -855,17 +865,21 @@ static int build_body(struct jit_ctx *ctx)
/* A /= X */
ctx->flags |= SEEN_X | SEEN_A;
/* Check if r_X is zero */
emit_bcond(MIPS_COND_EQ, r_X, r_zero,
b_imm(prog->len, ctx), ctx);
b_off = b_imm(prog->len, ctx);
if (is_bad_offset(b_off))
return -E2BIG;
emit_bcond(MIPS_COND_EQ, r_X, r_zero, b_off, ctx);
emit_load_imm(r_ret, 0, ctx); /* delay slot */
emit_div(r_A, r_X, ctx);
break;
case BPF_ALU | BPF_MOD | BPF_X:
/* A %= X */
ctx->flags |= SEEN_X | SEEN_A;
/* Check if r_X is zero */
emit_bcond(MIPS_COND_EQ, r_X, r_zero,
b_imm(prog->len, ctx), ctx);
b_off = b_imm(prog->len, ctx);
if (is_bad_offset(b_off))
return -E2BIG;
emit_bcond(MIPS_COND_EQ, r_X, r_zero, b_off, ctx);
emit_load_imm(r_ret, 0, ctx); /* delay slot */
emit_mod(r_A, r_X, ctx);
break;
Expand Down Expand Up @@ -926,7 +940,10 @@ static int build_body(struct jit_ctx *ctx)
break;
case BPF_JMP | BPF_JA:
/* pc += K */
emit_b(b_imm(i + k + 1, ctx), ctx);
b_off = b_imm(i + k + 1, ctx);
if (is_bad_offset(b_off))
return -E2BIG;
emit_b(b_off, ctx);
emit_nop(ctx);
break;
case BPF_JMP | BPF_JEQ | BPF_K:
Expand Down Expand Up @@ -1056,12 +1073,16 @@ static int build_body(struct jit_ctx *ctx)
break;
case BPF_RET | BPF_A:
ctx->flags |= SEEN_A;
if (i != prog->len - 1)
if (i != prog->len - 1) {
/*
* If this is not the last instruction
* then jump to the epilogue
*/
emit_b(b_imm(prog->len, ctx), ctx);
b_off = b_imm(prog->len, ctx);
if (is_bad_offset(b_off))
return -E2BIG;
emit_b(b_off, ctx);
}
emit_reg_move(r_ret, r_A, ctx); /* delay slot */
break;
case BPF_RET | BPF_K:
Expand All @@ -1075,7 +1096,10 @@ static int build_body(struct jit_ctx *ctx)
* If this is not the last instruction
* then jump to the epilogue
*/
emit_b(b_imm(prog->len, ctx), ctx);
b_off = b_imm(prog->len, ctx);
if (is_bad_offset(b_off))
return -E2BIG;
emit_b(b_off, ctx);
emit_nop(ctx);
}
break;
Expand Down Expand Up @@ -1133,8 +1157,10 @@ static int build_body(struct jit_ctx *ctx)
/* Load *dev pointer */
emit_load_ptr(r_s0, r_skb, off, ctx);
/* error (0) in the delay slot */
emit_bcond(MIPS_COND_EQ, r_s0, r_zero,
b_imm(prog->len, ctx), ctx);
b_off = b_imm(prog->len, ctx);
if (is_bad_offset(b_off))
return -E2BIG;
emit_bcond(MIPS_COND_EQ, r_s0, r_zero, b_off, ctx);
emit_reg_move(r_ret, r_zero, ctx);
if (code == (BPF_ANC | SKF_AD_IFINDEX)) {
BUILD_BUG_ON(sizeof_field(struct net_device, ifindex) != 4);
Expand Down Expand Up @@ -1244,7 +1270,10 @@ void bpf_jit_compile(struct bpf_prog *fp)

/* Generate the actual JIT code */
build_prologue(&ctx);
build_body(&ctx);
if (build_body(&ctx)) {
module_memfree(ctx.target);
goto out;
}
build_epilogue(&ctx);

/* Update the icache */
Expand Down
3 changes: 2 additions & 1 deletion arch/nios2/Kconfig.debug
Expand Up @@ -3,9 +3,10 @@
config EARLY_PRINTK
bool "Activate early kernel debugging"
default y
depends on TTY
select SERIAL_CORE_CONSOLE
help
Enable early printk on console
Enable early printk on console.
This is useful for kernel debugging when your machine crashes very
early before the console code is initialized.
You should normally say N here, unless you want to debug such a crash.
2 changes: 0 additions & 2 deletions arch/nios2/kernel/setup.c
Expand Up @@ -149,8 +149,6 @@ static void __init find_limits(unsigned long *min, unsigned long *max_low,

void __init setup_arch(char **cmdline_p)
{
int dram_start;

console_verbose();

memory_start = memblock_start_of_DRAM();
Expand Down
2 changes: 1 addition & 1 deletion arch/s390/include/asm/ccwgroup.h
Expand Up @@ -57,7 +57,7 @@ struct ccwgroup_device *get_ccwgroupdev_by_busid(struct ccwgroup_driver *gdrv,
char *bus_id);

extern int ccwgroup_set_online(struct ccwgroup_device *gdev);
extern int ccwgroup_set_offline(struct ccwgroup_device *gdev);
int ccwgroup_set_offline(struct ccwgroup_device *gdev, bool call_gdrv);

extern int ccwgroup_probe_ccwdev(struct ccw_device *cdev);
extern void ccwgroup_remove_ccwdev(struct ccw_device *cdev);
Expand Down
2 changes: 1 addition & 1 deletion arch/x86/crypto/aesni-intel_glue.c
Expand Up @@ -849,7 +849,7 @@ static int xts_crypt(struct skcipher_request *req, bool encrypt)
return -EINVAL;

err = skcipher_walk_virt(&walk, req, false);
if (err)
if (!walk.nbytes)
return err;

if (unlikely(tail > 0 && walk.nbytes < walk.total)) {
Expand Down
1 change: 1 addition & 0 deletions arch/x86/events/intel/core.c
Expand Up @@ -263,6 +263,7 @@ static struct event_constraint intel_icl_event_constraints[] = {
INTEL_EVENT_CONSTRAINT_RANGE(0xa8, 0xb0, 0xf),
INTEL_EVENT_CONSTRAINT_RANGE(0xb7, 0xbd, 0xf),
INTEL_EVENT_CONSTRAINT_RANGE(0xd0, 0xe6, 0xf),
INTEL_EVENT_CONSTRAINT(0xef, 0xf),
INTEL_EVENT_CONSTRAINT_RANGE(0xf0, 0xf4, 0xf),
EVENT_CONSTRAINT_END
};
Expand Down
2 changes: 1 addition & 1 deletion arch/x86/include/asm/kvm_page_track.h
Expand Up @@ -46,7 +46,7 @@ struct kvm_page_track_notifier_node {
struct kvm_page_track_notifier_node *node);
};

void kvm_page_track_init(struct kvm *kvm);
int kvm_page_track_init(struct kvm *kvm);
void kvm_page_track_cleanup(struct kvm *kvm);

void kvm_page_track_free_memslot(struct kvm_memory_slot *slot);
Expand Down
14 changes: 14 additions & 0 deletions arch/x86/include/asm/kvmclock.h
Expand Up @@ -2,6 +2,20 @@
#ifndef _ASM_X86_KVM_CLOCK_H
#define _ASM_X86_KVM_CLOCK_H

#include <linux/percpu.h>

extern struct clocksource kvm_clock;

DECLARE_PER_CPU(struct pvclock_vsyscall_time_info *, hv_clock_per_cpu);

static inline struct pvclock_vcpu_time_info *this_cpu_pvti(void)
{
return &this_cpu_read(hv_clock_per_cpu)->pvti;
}

static inline struct pvclock_vsyscall_time_info *this_cpu_hvclock(void)
{
return this_cpu_read(hv_clock_per_cpu);
}

#endif /* _ASM_X86_KVM_CLOCK_H */
13 changes: 2 additions & 11 deletions arch/x86/kernel/kvmclock.c
Expand Up @@ -49,18 +49,9 @@ early_param("no-kvmclock-vsyscall", parse_no_kvmclock_vsyscall);
static struct pvclock_vsyscall_time_info
hv_clock_boot[HVC_BOOT_ARRAY_SIZE] __bss_decrypted __aligned(PAGE_SIZE);
static struct pvclock_wall_clock wall_clock __bss_decrypted;
static DEFINE_PER_CPU(struct pvclock_vsyscall_time_info *, hv_clock_per_cpu);
static struct pvclock_vsyscall_time_info *hvclock_mem;

static inline struct pvclock_vcpu_time_info *this_cpu_pvti(void)
{
return &this_cpu_read(hv_clock_per_cpu)->pvti;
}

static inline struct pvclock_vsyscall_time_info *this_cpu_hvclock(void)
{
return this_cpu_read(hv_clock_per_cpu);
}
DEFINE_PER_CPU(struct pvclock_vsyscall_time_info *, hv_clock_per_cpu);
EXPORT_PER_CPU_SYMBOL_GPL(hv_clock_per_cpu);

/*
* The wallclock is the time of day when we booted. Since then, some time may
Expand Down
4 changes: 2 additions & 2 deletions arch/x86/kvm/cpuid.c
Expand Up @@ -65,8 +65,8 @@ static inline struct kvm_cpuid_entry2 *cpuid_entry2_find(
for (i = 0; i < nent; i++) {
e = &entries[i];

if (e->function == function && (e->index == index ||
!(e->flags & KVM_CPUID_FLAG_SIGNIFCANT_INDEX)))
if (e->function == function &&
(!(e->flags & KVM_CPUID_FLAG_SIGNIFCANT_INDEX) || e->index == index))
return e;
}

Expand Down
1 change: 0 additions & 1 deletion arch/x86/kvm/emulate.c
Expand Up @@ -435,7 +435,6 @@ static int fastop(struct x86_emulate_ctxt *ctxt, fastop_t fop);
__FOP_RET(#op)

asm(".pushsection .fixup, \"ax\"\n"
".global kvm_fastop_exception \n"
"kvm_fastop_exception: xor %esi, %esi; ret\n"
".popsection");

Expand Down
10 changes: 5 additions & 5 deletions arch/x86/kvm/ioapic.c
Expand Up @@ -319,8 +319,8 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
unsigned index;
bool mask_before, mask_after;
union kvm_ioapic_redirect_entry *e;
unsigned long vcpu_bitmap;
int old_remote_irr, old_delivery_status, old_dest_id, old_dest_mode;
DECLARE_BITMAP(vcpu_bitmap, KVM_MAX_VCPUS);

switch (ioapic->ioregsel) {
case IOAPIC_REG_VERSION:
Expand Down Expand Up @@ -384,9 +384,9 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
irq.shorthand = APIC_DEST_NOSHORT;
irq.dest_id = e->fields.dest_id;
irq.msi_redir_hint = false;
bitmap_zero(&vcpu_bitmap, 16);
bitmap_zero(vcpu_bitmap, KVM_MAX_VCPUS);
kvm_bitmap_or_dest_vcpus(ioapic->kvm, &irq,
&vcpu_bitmap);
vcpu_bitmap);
if (old_dest_mode != e->fields.dest_mode ||
old_dest_id != e->fields.dest_id) {
/*
Expand All @@ -399,10 +399,10 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
kvm_lapic_irq_dest_mode(
!!e->fields.dest_mode);
kvm_bitmap_or_dest_vcpus(ioapic->kvm, &irq,
&vcpu_bitmap);
vcpu_bitmap);
}
kvm_make_scan_ioapic_request_mask(ioapic->kvm,
&vcpu_bitmap);
vcpu_bitmap);
} else {
kvm_make_scan_ioapic_request(ioapic->kvm);
}
Expand Down
4 changes: 2 additions & 2 deletions arch/x86/kvm/mmu/page_track.c
Expand Up @@ -163,13 +163,13 @@ void kvm_page_track_cleanup(struct kvm *kvm)
cleanup_srcu_struct(&head->track_srcu);
}

void kvm_page_track_init(struct kvm *kvm)
int kvm_page_track_init(struct kvm *kvm)
{
struct kvm_page_track_notifier_head *head;

head = &kvm->arch.track_notifier_head;
init_srcu_struct(&head->track_srcu);
INIT_HLIST_HEAD(&head->track_notifier_list);
return init_srcu_struct(&head->track_srcu);
}

/*
Expand Down
1 change: 0 additions & 1 deletion arch/x86/kvm/svm/nested.c
Expand Up @@ -545,7 +545,6 @@ static void nested_vmcb02_prepare_control(struct vcpu_svm *svm)
(svm->nested.ctl.int_ctl & int_ctl_vmcb12_bits) |
(svm->vmcb01.ptr->control.int_ctl & int_ctl_vmcb01_bits);

svm->vmcb->control.virt_ext = svm->nested.ctl.virt_ext;
svm->vmcb->control.int_vector = svm->nested.ctl.int_vector;
svm->vmcb->control.int_state = svm->nested.ctl.int_state;
svm->vmcb->control.event_inj = svm->nested.ctl.event_inj;
Expand Down

0 comments on commit bad2461

Please sign in to comment.