Skip to content

Commit

Permalink
Merge tag 'v5.10.30' into 5.10
Browse files Browse the repository at this point in the history
This is the 5.10.30 stable release
  • Loading branch information
xanmod committed Apr 14, 2021
2 parents e5617f0 + 1e79874 commit f98b36b
Show file tree
Hide file tree
Showing 193 changed files with 1,827 additions and 861 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ properties:
description:
Reference to an nvmem node for the MAC address

nvmem-cells-names:
nvmem-cell-names:
const: mac-address

phy-connection-type:
Expand Down
2 changes: 1 addition & 1 deletion Makefile
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
PATCHLEVEL = 10
SUBLEVEL = 29
SUBLEVEL = 30
EXTRAVERSION =
NAME = Dare mighty things

Expand Down
1 change: 1 addition & 0 deletions arch/arm/boot/dts/armada-385-turris-omnia.dts
Original file line number Diff line number Diff line change
Expand Up @@ -236,6 +236,7 @@
status = "okay";
compatible = "ethernet-phy-id0141.0DD1", "ethernet-phy-ieee802.3-c22";
reg = <1>;
marvell,reg-init = <3 18 0 0x4985>;

/* irq is connected to &pcawan pin 7 */
};
Expand Down
2 changes: 2 additions & 0 deletions arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi
Original file line number Diff line number Diff line change
Expand Up @@ -432,6 +432,7 @@
pinctrl-0 = <&pinctrl_usdhc2>;
cd-gpios = <&gpio1 4 GPIO_ACTIVE_LOW>;
wp-gpios = <&gpio1 2 GPIO_ACTIVE_HIGH>;
vmmc-supply = <&vdd_sd1_reg>;
status = "disabled";
};

Expand All @@ -441,5 +442,6 @@
&pinctrl_usdhc3_cdwp>;
cd-gpios = <&gpio1 27 GPIO_ACTIVE_LOW>;
wp-gpios = <&gpio1 29 GPIO_ACTIVE_HIGH>;
vmmc-supply = <&vdd_sd0_reg>;
status = "disabled";
};
39 changes: 39 additions & 0 deletions arch/arm/mach-omap2/omap-secure.c
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
*/

#include <linux/arm-smccc.h>
#include <linux/cpu_pm.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/io.h>
Expand All @@ -20,6 +21,7 @@

#include "common.h"
#include "omap-secure.h"
#include "soc.h"

static phys_addr_t omap_secure_memblock_base;

Expand Down Expand Up @@ -213,3 +215,40 @@ void __init omap_secure_init(void)
{
omap_optee_init_check();
}

/*
* Dummy dispatcher call after core OSWR and MPU off. Updates the ROM return
* address after MMU has been re-enabled after CPU1 has been woken up again.
* Otherwise the ROM code will attempt to use the earlier physical return
* address that got set with MMU off when waking up CPU1. Only used on secure
* devices.
*/
static int cpu_notifier(struct notifier_block *nb, unsigned long cmd, void *v)
{
switch (cmd) {
case CPU_CLUSTER_PM_EXIT:
omap_secure_dispatcher(OMAP4_PPA_SERVICE_0,
FLAG_START_CRITICAL,
0, 0, 0, 0, 0);
break;
default:
break;
}

return NOTIFY_OK;
}

static struct notifier_block secure_notifier_block = {
.notifier_call = cpu_notifier,
};

static int __init secure_pm_init(void)
{
if (omap_type() == OMAP2_DEVICE_TYPE_GP || !soc_is_omap44xx())
return 0;

cpu_pm_register_notifier(&secure_notifier_block);

return 0;
}
omap_arch_initcall(secure_pm_init);
1 change: 1 addition & 0 deletions arch/arm/mach-omap2/omap-secure.h
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,7 @@
#define OMAP5_DRA7_MON_SET_ACR_INDEX 0x107

/* Secure PPA(Primary Protected Application) APIs */
#define OMAP4_PPA_SERVICE_0 0x21
#define OMAP4_PPA_L2_POR_INDEX 0x23
#define OMAP4_PPA_CPU_ACTRL_SMP_INDEX 0x25

Expand Down
4 changes: 2 additions & 2 deletions arch/arm/mach-omap2/pmic-cpcap.c
Original file line number Diff line number Diff line change
Expand Up @@ -246,10 +246,10 @@ int __init omap4_cpcap_init(void)
omap_voltage_register_pmic(voltdm, &omap443x_max8952_mpu);

if (of_machine_is_compatible("motorola,droid-bionic")) {
voltdm = voltdm_lookup("mpu");
voltdm = voltdm_lookup("core");
omap_voltage_register_pmic(voltdm, &omap_cpcap_core);

voltdm = voltdm_lookup("mpu");
voltdm = voltdm_lookup("iva");
omap_voltage_register_pmic(voltdm, &omap_cpcap_iva);
} else {
voltdm = voltdm_lookup("core");
Expand Down
2 changes: 1 addition & 1 deletion arch/arm64/boot/dts/freescale/imx8mm-pinfunc.h
Original file line number Diff line number Diff line change
Expand Up @@ -124,7 +124,7 @@
#define MX8MM_IOMUXC_SD1_CMD_USDHC1_CMD 0x0A4 0x30C 0x000 0x0 0x0
#define MX8MM_IOMUXC_SD1_CMD_GPIO2_IO1 0x0A4 0x30C 0x000 0x5 0x0
#define MX8MM_IOMUXC_SD1_DATA0_USDHC1_DATA0 0x0A8 0x310 0x000 0x0 0x0
#define MX8MM_IOMUXC_SD1_DATA0_GPIO2_IO2 0x0A8 0x31 0x000 0x5 0x0
#define MX8MM_IOMUXC_SD1_DATA0_GPIO2_IO2 0x0A8 0x310 0x000 0x5 0x0
#define MX8MM_IOMUXC_SD1_DATA1_USDHC1_DATA1 0x0AC 0x314 0x000 0x0 0x0
#define MX8MM_IOMUXC_SD1_DATA1_GPIO2_IO3 0x0AC 0x314 0x000 0x5 0x0
#define MX8MM_IOMUXC_SD1_DATA2_USDHC1_DATA2 0x0B0 0x318 0x000 0x0 0x0
Expand Down
2 changes: 1 addition & 1 deletion arch/arm64/boot/dts/freescale/imx8mq-pinfunc.h
Original file line number Diff line number Diff line change
Expand Up @@ -130,7 +130,7 @@
#define MX8MQ_IOMUXC_SD1_CMD_USDHC1_CMD 0x0A4 0x30C 0x000 0x0 0x0
#define MX8MQ_IOMUXC_SD1_CMD_GPIO2_IO1 0x0A4 0x30C 0x000 0x5 0x0
#define MX8MQ_IOMUXC_SD1_DATA0_USDHC1_DATA0 0x0A8 0x310 0x000 0x0 0x0
#define MX8MQ_IOMUXC_SD1_DATA0_GPIO2_IO2 0x0A8 0x31 0x000 0x5 0x0
#define MX8MQ_IOMUXC_SD1_DATA0_GPIO2_IO2 0x0A8 0x310 0x000 0x5 0x0
#define MX8MQ_IOMUXC_SD1_DATA1_USDHC1_DATA1 0x0AC 0x314 0x000 0x0 0x0
#define MX8MQ_IOMUXC_SD1_DATA1_GPIO2_IO3 0x0AC 0x314 0x000 0x5 0x0
#define MX8MQ_IOMUXC_SD1_DATA2_USDHC1_DATA2 0x0B0 0x318 0x000 0x0 0x0
Expand Down
8 changes: 1 addition & 7 deletions arch/ia64/include/asm/ptrace.h
Original file line number Diff line number Diff line change
Expand Up @@ -54,8 +54,7 @@

static inline unsigned long user_stack_pointer(struct pt_regs *regs)
{
/* FIXME: should this be bspstore + nr_dirty regs? */
return regs->ar_bspstore;
return regs->r12;
}

static inline int is_syscall_success(struct pt_regs *regs)
Expand All @@ -79,11 +78,6 @@ static inline long regs_return_value(struct pt_regs *regs)
unsigned long __ip = instruction_pointer(regs); \
(__ip & ~3UL) + ((__ip & 3UL) << 2); \
})
/*
* Why not default? Because user_stack_pointer() on ia64 gives register
* stack backing store instead...
*/
#define current_user_stack_pointer() (current_pt_regs()->r12)

/* given a pointer to a task_struct, return the user's pt_regs */
# define task_pt_regs(t) (((struct pt_regs *) ((char *) (t) + IA64_STK_OFFSET)) - 1)
Expand Down
2 changes: 1 addition & 1 deletion arch/nds32/mm/cacheflush.c
Original file line number Diff line number Diff line change
Expand Up @@ -238,7 +238,7 @@ void flush_dcache_page(struct page *page)
{
struct address_space *mapping;

mapping = page_mapping(page);
mapping = page_mapping_file(page);
if (mapping && !mapping_mapped(mapping))
set_bit(PG_dcache_dirty, &page->flags);
else {
Expand Down
2 changes: 1 addition & 1 deletion arch/parisc/include/asm/cmpxchg.h
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,7 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size)
#endif
case 4: return __cmpxchg_u32((unsigned int *)ptr,
(unsigned int)old, (unsigned int)new_);
case 1: return __cmpxchg_u8((u8 *)ptr, (u8)old, (u8)new_);
case 1: return __cmpxchg_u8((u8 *)ptr, old & 0xff, new_ & 0xff);
}
__cmpxchg_called_with_bad_pointer();
return old;
Expand Down
6 changes: 4 additions & 2 deletions arch/s390/kernel/cpcmd.c
Original file line number Diff line number Diff line change
Expand Up @@ -37,10 +37,12 @@ static int diag8_noresponse(int cmdlen)

static int diag8_response(int cmdlen, char *response, int *rlen)
{
unsigned long _cmdlen = cmdlen | 0x40000000L;
unsigned long _rlen = *rlen;
register unsigned long reg2 asm ("2") = (addr_t) cpcmd_buf;
register unsigned long reg3 asm ("3") = (addr_t) response;
register unsigned long reg4 asm ("4") = cmdlen | 0x40000000L;
register unsigned long reg5 asm ("5") = *rlen;
register unsigned long reg4 asm ("4") = _cmdlen;
register unsigned long reg5 asm ("5") = _rlen;

asm volatile(
" diag %2,%0,0x8\n"
Expand Down
2 changes: 1 addition & 1 deletion arch/x86/include/asm/smp.h
Original file line number Diff line number Diff line change
Expand Up @@ -132,7 +132,7 @@ void native_play_dead(void);
void play_dead_common(void);
void wbinvd_on_cpu(int cpu);
int wbinvd_on_all_cpus(void);
bool wakeup_cpu0(void);
void cond_wakeup_cpu0(void);

void native_smp_send_reschedule(int cpu);
void native_send_call_func_ipi(const struct cpumask *mask);
Expand Down
26 changes: 12 additions & 14 deletions arch/x86/kernel/smpboot.c
Original file line number Diff line number Diff line change
Expand Up @@ -1655,13 +1655,17 @@ void play_dead_common(void)
local_irq_disable();
}

bool wakeup_cpu0(void)
/**
* cond_wakeup_cpu0 - Wake up CPU0 if needed.
*
* If NMI wants to wake up CPU0, start CPU0.
*/
void cond_wakeup_cpu0(void)
{
if (smp_processor_id() == 0 && enable_start_cpu0)
return true;

return false;
start_cpu0();
}
EXPORT_SYMBOL_GPL(cond_wakeup_cpu0);

/*
* We need to flush the caches before going to sleep, lest we have
Expand Down Expand Up @@ -1730,11 +1734,8 @@ static inline void mwait_play_dead(void)
__monitor(mwait_ptr, 0, 0);
mb();
__mwait(eax, 0);
/*
* If NMI wants to wake up CPU0, start CPU0.
*/
if (wakeup_cpu0())
start_cpu0();

cond_wakeup_cpu0();
}
}

Expand All @@ -1745,11 +1746,8 @@ void hlt_play_dead(void)

while (1) {
native_halt();
/*
* If NMI wants to wake up CPU0, start CPU0.
*/
if (wakeup_cpu0())
start_cpu0();

cond_wakeup_cpu0();
}
}

Expand Down
13 changes: 7 additions & 6 deletions arch/x86/kvm/mmu/mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -5972,6 +5972,7 @@ static void kvm_recover_nx_lpages(struct kvm *kvm)
struct kvm_mmu_page *sp;
unsigned int ratio;
LIST_HEAD(invalid_list);
bool flush = false;
ulong to_zap;

rcu_idx = srcu_read_lock(&kvm->srcu);
Expand All @@ -5992,20 +5993,20 @@ static void kvm_recover_nx_lpages(struct kvm *kvm)
struct kvm_mmu_page,
lpage_disallowed_link);
WARN_ON_ONCE(!sp->lpage_disallowed);
if (sp->tdp_mmu_page)
kvm_tdp_mmu_zap_gfn_range(kvm, sp->gfn,
sp->gfn + KVM_PAGES_PER_HPAGE(sp->role.level));
else {
if (sp->tdp_mmu_page) {
flush |= kvm_tdp_mmu_zap_sp(kvm, sp);
} else {
kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
WARN_ON_ONCE(sp->lpage_disallowed);
}

if (need_resched() || spin_needbreak(&kvm->mmu_lock)) {
kvm_mmu_commit_zap_page(kvm, &invalid_list);
kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush);
cond_resched_lock(&kvm->mmu_lock);
flush = false;
}
}
kvm_mmu_commit_zap_page(kvm, &invalid_list);
kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush);

spin_unlock(&kvm->mmu_lock);
srcu_read_unlock(&kvm->srcu, rcu_idx);
Expand Down
30 changes: 7 additions & 23 deletions arch/x86/kvm/mmu/tdp_iter.c
Original file line number Diff line number Diff line change
Expand Up @@ -22,21 +22,22 @@ static gfn_t round_gfn_for_level(gfn_t gfn, int level)

/*
* Sets a TDP iterator to walk a pre-order traversal of the paging structure
* rooted at root_pt, starting with the walk to translate goal_gfn.
* rooted at root_pt, starting with the walk to translate next_last_level_gfn.
*/
void tdp_iter_start(struct tdp_iter *iter, u64 *root_pt, int root_level,
int min_level, gfn_t goal_gfn)
int min_level, gfn_t next_last_level_gfn)
{
WARN_ON(root_level < 1);
WARN_ON(root_level > PT64_ROOT_MAX_LEVEL);

iter->goal_gfn = goal_gfn;
iter->next_last_level_gfn = next_last_level_gfn;
iter->yielded_gfn = iter->next_last_level_gfn;
iter->root_level = root_level;
iter->min_level = min_level;
iter->level = root_level;
iter->pt_path[iter->level - 1] = root_pt;

iter->gfn = round_gfn_for_level(iter->goal_gfn, iter->level);
iter->gfn = round_gfn_for_level(iter->next_last_level_gfn, iter->level);
tdp_iter_refresh_sptep(iter);

iter->valid = true;
Expand Down Expand Up @@ -82,7 +83,7 @@ static bool try_step_down(struct tdp_iter *iter)

iter->level--;
iter->pt_path[iter->level - 1] = child_pt;
iter->gfn = round_gfn_for_level(iter->goal_gfn, iter->level);
iter->gfn = round_gfn_for_level(iter->next_last_level_gfn, iter->level);
tdp_iter_refresh_sptep(iter);

return true;
Expand All @@ -106,7 +107,7 @@ static bool try_step_side(struct tdp_iter *iter)
return false;

iter->gfn += KVM_PAGES_PER_HPAGE(iter->level);
iter->goal_gfn = iter->gfn;
iter->next_last_level_gfn = iter->gfn;
iter->sptep++;
iter->old_spte = READ_ONCE(*iter->sptep);

Expand Down Expand Up @@ -158,23 +159,6 @@ void tdp_iter_next(struct tdp_iter *iter)
iter->valid = false;
}

/*
* Restart the walk over the paging structure from the root, starting from the
* highest gfn the iterator had previously reached. Assumes that the entire
* paging structure, except the root page, may have been completely torn down
* and rebuilt.
*/
void tdp_iter_refresh_walk(struct tdp_iter *iter)
{
gfn_t goal_gfn = iter->goal_gfn;

if (iter->gfn > goal_gfn)
goal_gfn = iter->gfn;

tdp_iter_start(iter, iter->pt_path[iter->root_level - 1],
iter->root_level, iter->min_level, goal_gfn);
}

u64 *tdp_iter_root_pt(struct tdp_iter *iter)
{
return iter->pt_path[iter->root_level - 1];
Expand Down
11 changes: 8 additions & 3 deletions arch/x86/kvm/mmu/tdp_iter.h
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,13 @@ struct tdp_iter {
* The iterator will traverse the paging structure towards the mapping
* for this GFN.
*/
gfn_t goal_gfn;
gfn_t next_last_level_gfn;
/*
* The next_last_level_gfn at the time when the thread last
* yielded. Only yielding when the next_last_level_gfn !=
* yielded_gfn helps ensure forward progress.
*/
gfn_t yielded_gfn;
/* Pointers to the page tables traversed to reach the current SPTE */
u64 *pt_path[PT64_ROOT_MAX_LEVEL];
/* A pointer to the current SPTE */
Expand Down Expand Up @@ -52,9 +58,8 @@ struct tdp_iter {
u64 *spte_to_child_pt(u64 pte, int level);

void tdp_iter_start(struct tdp_iter *iter, u64 *root_pt, int root_level,
int min_level, gfn_t goal_gfn);
int min_level, gfn_t next_last_level_gfn);
void tdp_iter_next(struct tdp_iter *iter);
void tdp_iter_refresh_walk(struct tdp_iter *iter);
u64 *tdp_iter_root_pt(struct tdp_iter *iter);

#endif /* __KVM_X86_MMU_TDP_ITER_H */

0 comments on commit f98b36b

Please sign in to comment.