Skip to content

Commit

Permalink
Merge tag 'v5.10.28' into v5.10-rt
Browse files Browse the repository at this point in the history
This is the 5.10.28 stable release

 Conflicts:
	kernel/trace/trace.c

Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
  • Loading branch information
rostedt committed Apr 16, 2021
2 parents c4c779e + ecdfb9d commit 083c744
Show file tree
Hide file tree
Showing 126 changed files with 1,006 additions and 597 deletions.
2 changes: 1 addition & 1 deletion Makefile
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
PATCHLEVEL = 10
SUBLEVEL = 27
SUBLEVEL = 28
EXTRAVERSION =
NAME = Dare mighty things

Expand Down
20 changes: 18 additions & 2 deletions arch/arm64/mm/mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -1447,14 +1447,30 @@ static void __remove_pgd_mapping(pgd_t *pgdir, unsigned long start, u64 size)

static bool inside_linear_region(u64 start, u64 size)
{
u64 start_linear_pa = __pa(_PAGE_OFFSET(vabits_actual));
u64 end_linear_pa = __pa(PAGE_END - 1);

if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
/*
* Check for a wrap, it is possible because of randomized linear
* mapping the start physical address is actually bigger than
* the end physical address. In this case set start to zero
* because [0, end_linear_pa] range must still be able to cover
* all addressable physical addresses.
*/
if (start_linear_pa > end_linear_pa)
start_linear_pa = 0;
}

WARN_ON(start_linear_pa > end_linear_pa);

/*
* Linear mapping region is the range [PAGE_OFFSET..(PAGE_END - 1)]
* accommodating both its ends but excluding PAGE_END. Max physical
* range which can be mapped inside this linear mapping range, must
* also be derived from its end points.
*/
return start >= __pa(_PAGE_OFFSET(vabits_actual)) &&
(start + size - 1) <= __pa(PAGE_END - 1);
return start >= start_linear_pa && (start + size - 1) <= end_linear_pa;
}

int arch_add_memory(int nid, u64 start, u64 size,
Expand Down
7 changes: 5 additions & 2 deletions arch/riscv/include/asm/uaccess.h
Original file line number Diff line number Diff line change
Expand Up @@ -306,7 +306,9 @@ do { \
* data types like structures or arrays.
*
* @ptr must have pointer-to-simple-variable type, and @x must be assignable
* to the result of dereferencing @ptr.
* to the result of dereferencing @ptr. The value of @x is copied to avoid
* re-ordering where @x is evaluated inside the block that enables user-space
* access (thus bypassing user space protection if @x is a function).
*
* Caller must check the pointer with access_ok() before calling this
* function.
Expand All @@ -316,12 +318,13 @@ do { \
#define __put_user(x, ptr) \
({ \
__typeof__(*(ptr)) __user *__gu_ptr = (ptr); \
__typeof__(*__gu_ptr) __val = (x); \
long __pu_err = 0; \
\
__chk_user_ptr(__gu_ptr); \
\
__enable_user_access(); \
__put_user_nocheck(x, __gu_ptr, __pu_err); \
__put_user_nocheck(__val, __gu_ptr, __pu_err); \
__disable_user_access(); \
\
__pu_err; \
Expand Down
2 changes: 1 addition & 1 deletion arch/s390/include/asm/vdso/data.h
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
#include <vdso/datapage.h>

struct arch_vdso_data {
__u64 tod_steering_delta;
__s64 tod_steering_delta;
__u64 tod_steering_end;
};

Expand Down
1 change: 1 addition & 0 deletions arch/s390/kernel/time.c
Original file line number Diff line number Diff line change
Expand Up @@ -398,6 +398,7 @@ static void clock_sync_global(unsigned long long delta)
tod_steering_delta);
tod_steering_end = now + (abs(tod_steering_delta) << 15);
vdso_data->arch_data.tod_steering_end = tod_steering_end;
vdso_data->arch_data.tod_steering_delta = tod_steering_delta;

/* Update LPAR offset. */
if (ptff_query(PTFF_QTO) && ptff(&qto, sizeof(qto), PTFF_QTO) == 0)
Expand Down
1 change: 1 addition & 0 deletions arch/x86/include/asm/smp.h
Original file line number Diff line number Diff line change
Expand Up @@ -132,6 +132,7 @@ void native_play_dead(void);
void play_dead_common(void);
void wbinvd_on_cpu(int cpu);
int wbinvd_on_all_cpus(void);
bool wakeup_cpu0(void);

void native_smp_send_reschedule(int cpu);
void native_send_call_func_ipi(const struct cpumask *mask);
Expand Down
25 changes: 12 additions & 13 deletions arch/x86/kernel/acpi/boot.c
Original file line number Diff line number Diff line change
Expand Up @@ -1554,10 +1554,18 @@ void __init acpi_boot_table_init(void)
/*
* Initialize the ACPI boot-time table parser.
*/
if (acpi_table_init()) {
if (acpi_locate_initial_tables())
disable_acpi();
return;
}
else
acpi_reserve_initial_tables();
}

int __init early_acpi_boot_init(void)
{
if (acpi_disabled)
return 1;

acpi_table_init_complete();

acpi_table_parse(ACPI_SIG_BOOT, acpi_parse_sbf);

Expand All @@ -1570,18 +1578,9 @@ void __init acpi_boot_table_init(void)
} else {
printk(KERN_WARNING PREFIX "Disabling ACPI support\n");
disable_acpi();
return;
return 1;
}
}
}

int __init early_acpi_boot_init(void)
{
/*
* If acpi_disabled, bail out
*/
if (acpi_disabled)
return 1;

/*
* Process the Multiple APIC Description Table (MADT), if present
Expand Down
8 changes: 3 additions & 5 deletions arch/x86/kernel/setup.c
Original file line number Diff line number Diff line change
Expand Up @@ -1051,6 +1051,9 @@ void __init setup_arch(char **cmdline_p)

cleanup_highmap();

/* Look for ACPI tables and reserve memory occupied by them. */
acpi_boot_table_init();

memblock_set_current_limit(ISA_END_ADDRESS);
e820__memblock_setup();

Expand Down Expand Up @@ -1136,11 +1139,6 @@ void __init setup_arch(char **cmdline_p)

early_platform_quirks();

/*
* Parse the ACPI tables for possible boot-time SMP configuration.
*/
acpi_boot_table_init();

early_acpi_boot_init();

initmem_init();
Expand Down
2 changes: 1 addition & 1 deletion arch/x86/kernel/smpboot.c
Original file line number Diff line number Diff line change
Expand Up @@ -1655,7 +1655,7 @@ void play_dead_common(void)
local_irq_disable();
}

static bool wakeup_cpu0(void)
bool wakeup_cpu0(void)
{
if (smp_processor_id() == 0 && enable_start_cpu0)
return true;
Expand Down
28 changes: 23 additions & 5 deletions arch/x86/kvm/svm/nested.c
Original file line number Diff line number Diff line change
Expand Up @@ -246,11 +246,18 @@ static bool nested_vmcb_check_controls(struct vmcb_control_area *control)
return true;
}

static bool nested_vmcb_checks(struct vcpu_svm *svm, struct vmcb *vmcb12)
static bool nested_vmcb_check_save(struct vcpu_svm *svm, struct vmcb *vmcb12)
{
struct kvm_vcpu *vcpu = &svm->vcpu;
bool vmcb12_lma;

/*
* FIXME: these should be done after copying the fields,
* to avoid TOC/TOU races. For these save area checks
* the possible damage is limited since kvm_set_cr0 and
* kvm_set_cr4 handle failure; EFER_SVME is an exception
* so it is force-set later in nested_prepare_vmcb_save.
*/
if ((vmcb12->save.efer & EFER_SVME) == 0)
return false;

Expand All @@ -271,7 +278,7 @@ static bool nested_vmcb_checks(struct vcpu_svm *svm, struct vmcb *vmcb12)
if (kvm_valid_cr4(&svm->vcpu, vmcb12->save.cr4))
return false;

return nested_vmcb_check_controls(&vmcb12->control);
return true;
}

static void load_nested_vmcb_control(struct vcpu_svm *svm,
Expand Down Expand Up @@ -396,7 +403,14 @@ static void nested_prepare_vmcb_save(struct vcpu_svm *svm, struct vmcb *vmcb12)
svm->vmcb->save.gdtr = vmcb12->save.gdtr;
svm->vmcb->save.idtr = vmcb12->save.idtr;
kvm_set_rflags(&svm->vcpu, vmcb12->save.rflags);
svm_set_efer(&svm->vcpu, vmcb12->save.efer);

/*
* Force-set EFER_SVME even though it is checked earlier on the
* VMCB12, because the guest can flip the bit between the check
* and now. Clearing EFER_SVME would call svm_free_nested.
*/
svm_set_efer(&svm->vcpu, vmcb12->save.efer | EFER_SVME);

svm_set_cr0(&svm->vcpu, vmcb12->save.cr0);
svm_set_cr4(&svm->vcpu, vmcb12->save.cr4);
svm->vmcb->save.cr2 = svm->vcpu.arch.cr2 = vmcb12->save.cr2;
Expand Down Expand Up @@ -454,7 +468,6 @@ int enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb12_gpa,
int ret;

svm->nested.vmcb12_gpa = vmcb12_gpa;
load_nested_vmcb_control(svm, &vmcb12->control);
nested_prepare_vmcb_save(svm, vmcb12);
nested_prepare_vmcb_control(svm);

Expand Down Expand Up @@ -501,7 +514,10 @@ int nested_svm_vmrun(struct vcpu_svm *svm)
if (WARN_ON_ONCE(!svm->nested.initialized))
return -EINVAL;

if (!nested_vmcb_checks(svm, vmcb12)) {
load_nested_vmcb_control(svm, &vmcb12->control);

if (!nested_vmcb_check_save(svm, vmcb12) ||
!nested_vmcb_check_controls(&svm->nested.ctl)) {
vmcb12->control.exit_code = SVM_EXIT_ERR;
vmcb12->control.exit_code_hi = 0;
vmcb12->control.exit_info_1 = 0;
Expand Down Expand Up @@ -1205,6 +1221,8 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu,
*/
if (!(save->cr0 & X86_CR0_PG))
goto out_free;
if (!(save->efer & EFER_SVME))
goto out_free;

/*
* All checks done, we can enter guest mode. L1 control fields
Expand Down
27 changes: 23 additions & 4 deletions arch/x86/net/bpf_jit_comp.c
Original file line number Diff line number Diff line change
Expand Up @@ -1735,7 +1735,7 @@ static int invoke_bpf_mod_ret(const struct btf_func_model *m, u8 **pprog,
* add rsp, 8 // skip eth_type_trans's frame
* ret // return to its caller
*/
int arch_prepare_bpf_trampoline(void *image, void *image_end,
int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *image_end,
const struct btf_func_model *m, u32 flags,
struct bpf_tramp_progs *tprogs,
void *orig_call)
Expand Down Expand Up @@ -1774,6 +1774,15 @@ int arch_prepare_bpf_trampoline(void *image, void *image_end,

save_regs(m, &prog, nr_args, stack_size);

if (flags & BPF_TRAMP_F_CALL_ORIG) {
/* arg1: mov rdi, im */
emit_mov_imm64(&prog, BPF_REG_1, (long) im >> 32, (u32) (long) im);
if (emit_call(&prog, __bpf_tramp_enter, prog)) {
ret = -EINVAL;
goto cleanup;
}
}

if (fentry->nr_progs)
if (invoke_bpf(m, &prog, fentry, stack_size))
return -EINVAL;
Expand All @@ -1792,8 +1801,7 @@ int arch_prepare_bpf_trampoline(void *image, void *image_end,
}

if (flags & BPF_TRAMP_F_CALL_ORIG) {
if (fentry->nr_progs || fmod_ret->nr_progs)
restore_regs(m, &prog, nr_args, stack_size);
restore_regs(m, &prog, nr_args, stack_size);

/* call original function */
if (emit_call(&prog, orig_call, prog)) {
Expand All @@ -1802,6 +1810,9 @@ int arch_prepare_bpf_trampoline(void *image, void *image_end,
}
/* remember return value in a stack for bpf prog to access */
emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
im->ip_after_call = prog;
memcpy(prog, ideal_nops[NOP_ATOMIC5], X86_PATCH_SIZE);
prog += X86_PATCH_SIZE;
}

if (fmod_ret->nr_progs) {
Expand Down Expand Up @@ -1832,9 +1843,17 @@ int arch_prepare_bpf_trampoline(void *image, void *image_end,
* the return value is only updated on the stack and still needs to be
* restored to R0.
*/
if (flags & BPF_TRAMP_F_CALL_ORIG)
if (flags & BPF_TRAMP_F_CALL_ORIG) {
im->ip_epilogue = prog;
/* arg1: mov rdi, im */
emit_mov_imm64(&prog, BPF_REG_1, (long) im >> 32, (u32) (long) im);
if (emit_call(&prog, __bpf_tramp_exit, prog)) {
ret = -EINVAL;
goto cleanup;
}
/* restore original return value back into RAX */
emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, -8);
}

EMIT1(0x5B); /* pop rbx */
EMIT1(0xC9); /* leave */
Expand Down
64 changes: 33 additions & 31 deletions arch/xtensa/kernel/coprocessor.S
Original file line number Diff line number Diff line change
Expand Up @@ -99,37 +99,6 @@
LOAD_CP_REGS_TAB(6)
LOAD_CP_REGS_TAB(7)

/*
* coprocessor_flush(struct thread_info*, index)
* a2 a3
*
* Save coprocessor registers for coprocessor 'index'.
* The register values are saved to or loaded from the coprocessor area
* inside the task_info structure.
*
* Note that this function doesn't update the coprocessor_owner information!
*
*/

ENTRY(coprocessor_flush)

/* reserve 4 bytes on stack to save a0 */
abi_entry(4)

s32i a0, a1, 0
movi a0, .Lsave_cp_regs_jump_table
addx8 a3, a3, a0
l32i a4, a3, 4
l32i a3, a3, 0
add a2, a2, a4
beqz a3, 1f
callx0 a3
1: l32i a0, a1, 0

abi_ret(4)

ENDPROC(coprocessor_flush)

/*
* Entry condition:
*
Expand Down Expand Up @@ -245,6 +214,39 @@ ENTRY(fast_coprocessor)

ENDPROC(fast_coprocessor)

.text

/*
* coprocessor_flush(struct thread_info*, index)
* a2 a3
*
* Save coprocessor registers for coprocessor 'index'.
* The register values are saved to or loaded from the coprocessor area
* inside the task_info structure.
*
* Note that this function doesn't update the coprocessor_owner information!
*
*/

ENTRY(coprocessor_flush)

/* reserve 4 bytes on stack to save a0 */
abi_entry(4)

s32i a0, a1, 0
movi a0, .Lsave_cp_regs_jump_table
addx8 a3, a3, a0
l32i a4, a3, 4
l32i a3, a3, 0
add a2, a2, a4
beqz a3, 1f
callx0 a3
1: l32i a0, a1, 0

abi_ret(4)

ENDPROC(coprocessor_flush)

.data

ENTRY(coprocessor_owner)
Expand Down
5 changes: 4 additions & 1 deletion arch/xtensa/mm/fault.c
Original file line number Diff line number Diff line change
Expand Up @@ -112,8 +112,11 @@ void do_page_fault(struct pt_regs *regs)
*/
fault = handle_mm_fault(vma, address, flags, regs);

if (fault_signal_pending(fault, regs))
if (fault_signal_pending(fault, regs)) {
if (!user_mode(regs))
goto bad_page_fault;
return;
}

if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
Expand Down

0 comments on commit 083c744

Please sign in to comment.