Skip to content

Commit

Permalink
Merge tag 'v5.10.98' into 5.10
Browse files Browse the repository at this point in the history
This is the 5.10.98 stable release
  • Loading branch information
xanmod committed Feb 5, 2022
2 parents 7d89661 + 12a0a56 commit 01105e0
Show file tree
Hide file tree
Showing 30 changed files with 163 additions and 146 deletions.
3 changes: 2 additions & 1 deletion Documentation/accounting/psi.rst
Expand Up @@ -92,7 +92,8 @@ Triggers can be set on more than one psi metric and more than one trigger
for the same psi metric can be specified. However for each trigger a separate
file descriptor is required to be able to poll it separately from others,
therefore for each trigger a separate open() syscall should be made even
when opening the same psi interface file.
when opening the same psi interface file. Write operations to a file descriptor
with an already existing psi trigger will fail with EBUSY.

Monitors activate only when system enters stall state for the monitored
psi metric and deactivates upon exit from the stall state. While system is
Expand Down
2 changes: 1 addition & 1 deletion Makefile
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
PATCHLEVEL = 10
SUBLEVEL = 96
SUBLEVEL = 98
EXTRAVERSION =
NAME = Dare mighty things

Expand Down
1 change: 1 addition & 0 deletions arch/x86/include/asm/kvm_host.h
Expand Up @@ -1285,6 +1285,7 @@ struct kvm_x86_ops {
};

struct kvm_x86_nested_ops {
void (*leave_nested)(struct kvm_vcpu *vcpu);
int (*check_events)(struct kvm_vcpu *vcpu);
bool (*hv_timer_pending)(struct kvm_vcpu *vcpu);
int (*get_state)(struct kvm_vcpu *vcpu,
Expand Down
2 changes: 2 additions & 0 deletions arch/x86/kernel/cpu/mce/intel.c
Expand Up @@ -486,6 +486,8 @@ static void intel_ppin_init(struct cpuinfo_x86 *c)
case INTEL_FAM6_BROADWELL_X:
case INTEL_FAM6_SKYLAKE_X:
case INTEL_FAM6_ICELAKE_X:
case INTEL_FAM6_ICELAKE_D:
case INTEL_FAM6_SAPPHIRERAPIDS_X:
case INTEL_FAM6_XEON_PHI_KNL:
case INTEL_FAM6_XEON_PHI_KNM:

Expand Down
10 changes: 8 additions & 2 deletions arch/x86/kvm/svm/nested.c
Expand Up @@ -783,8 +783,10 @@ void svm_free_nested(struct vcpu_svm *svm)
/*
* Forcibly leave nested mode in order to be able to reset the VCPU later on.
*/
void svm_leave_nested(struct vcpu_svm *svm)
void svm_leave_nested(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);

if (is_guest_mode(&svm->vcpu)) {
struct vmcb *hsave = svm->nested.hsave;
struct vmcb *vmcb = svm->vmcb;
Expand Down Expand Up @@ -1185,7 +1187,7 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu,
return -EINVAL;

if (!(kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE)) {
svm_leave_nested(svm);
svm_leave_nested(vcpu);
svm_set_gif(svm, !!(kvm_state->flags & KVM_STATE_NESTED_GIF_SET));
return 0;
}
Expand Down Expand Up @@ -1238,6 +1240,9 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu,
copy_vmcb_control_area(&hsave->control, &svm->vmcb->control);
hsave->save = *save;

if (is_guest_mode(vcpu))
svm_leave_nested(vcpu);

svm->nested.vmcb12_gpa = kvm_state->hdr.svm.vmcb_pa;
load_nested_vmcb_control(svm, ctl);
nested_prepare_vmcb_control(svm);
Expand All @@ -1252,6 +1257,7 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu,
}

struct kvm_x86_nested_ops svm_nested_ops = {
.leave_nested = svm_leave_nested,
.check_events = svm_check_nested_events,
.get_nested_state_pages = svm_get_nested_state_pages,
.get_state = svm_get_nested_state,
Expand Down
2 changes: 1 addition & 1 deletion arch/x86/kvm/svm/svm.c
Expand Up @@ -279,7 +279,7 @@ int svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)

if ((old_efer & EFER_SVME) != (efer & EFER_SVME)) {
if (!(efer & EFER_SVME)) {
svm_leave_nested(svm);
svm_leave_nested(vcpu);
svm_set_gif(svm, true);

/*
Expand Down
2 changes: 1 addition & 1 deletion arch/x86/kvm/svm/svm.h
Expand Up @@ -393,7 +393,7 @@ static inline bool nested_exit_on_nmi(struct vcpu_svm *svm)

int enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa,
struct vmcb *nested_vmcb);
void svm_leave_nested(struct vcpu_svm *svm);
void svm_leave_nested(struct kvm_vcpu *vcpu);
void svm_free_nested(struct vcpu_svm *svm);
int svm_allocate_nested(struct vcpu_svm *svm);
int nested_svm_vmrun(struct vcpu_svm *svm);
Expand Down
1 change: 1 addition & 0 deletions arch/x86/kvm/vmx/nested.c
Expand Up @@ -6628,6 +6628,7 @@ __init int nested_vmx_hardware_setup(int (*exit_handlers[])(struct kvm_vcpu *))
}

struct kvm_x86_nested_ops vmx_nested_ops = {
.leave_nested = vmx_leave_nested,
.check_events = vmx_check_nested_events,
.hv_timer_pending = nested_vmx_preemption_timer_pending,
.get_state = vmx_get_nested_state,
Expand Down
2 changes: 2 additions & 0 deletions arch/x86/kvm/x86.c
Expand Up @@ -4391,6 +4391,8 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
vcpu->arch.hflags |= HF_SMM_MASK;
else
vcpu->arch.hflags &= ~HF_SMM_MASK;

kvm_x86_ops.nested_ops->leave_nested(vcpu);
kvm_smm_changed(vcpu);
}

Expand Down
39 changes: 1 addition & 38 deletions drivers/bus/simple-pm-bus.c
Expand Up @@ -16,33 +16,7 @@

static int simple_pm_bus_probe(struct platform_device *pdev)
{
const struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
const struct of_device_id *match;

/*
* Allow user to use driver_override to bind this driver to a
* transparent bus device which has a different compatible string
* that's not listed in simple_pm_bus_of_match. We don't want to do any
* of the simple-pm-bus tasks for these devices, so return early.
*/
if (pdev->driver_override)
return 0;

match = of_match_device(dev->driver->of_match_table, dev);
/*
* These are transparent bus devices (not simple-pm-bus matches) that
* have their child nodes populated automatically. So, don't need to
* do anything more. We only match with the device if this driver is
* the most specific match because we don't want to incorrectly bind to
* a device that has a more specific driver.
*/
if (match && match->data) {
if (of_property_match_string(np, "compatible", match->compatible) == 0)
return 0;
else
return -ENODEV;
}
struct device_node *np = pdev->dev.of_node;

dev_dbg(&pdev->dev, "%s\n", __func__);

Expand All @@ -56,25 +30,14 @@ static int simple_pm_bus_probe(struct platform_device *pdev)

static int simple_pm_bus_remove(struct platform_device *pdev)
{
const void *data = of_device_get_match_data(&pdev->dev);

if (pdev->driver_override || data)
return 0;

dev_dbg(&pdev->dev, "%s\n", __func__);

pm_runtime_disable(&pdev->dev);
return 0;
}

#define ONLY_BUS ((void *) 1) /* Match if the device is only a bus. */

static const struct of_device_id simple_pm_bus_of_match[] = {
{ .compatible = "simple-pm-bus", },
{ .compatible = "simple-bus", .data = ONLY_BUS },
{ .compatible = "simple-mfd", .data = ONLY_BUS },
{ .compatible = "isa", .data = ONLY_BUS },
{ .compatible = "arm,amba-bus", .data = ONLY_BUS },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, simple_pm_bus_of_match);
Expand Down
10 changes: 1 addition & 9 deletions drivers/gpu/drm/vc4/vc4_hdmi.c
Expand Up @@ -1399,14 +1399,8 @@ static int vc4_hdmi_cec_adap_enable(struct cec_adapter *adap, bool enable)
struct vc4_hdmi *vc4_hdmi = cec_get_drvdata(adap);
/* clock period in microseconds */
const u32 usecs = 1000000 / CEC_CLOCK_FREQ;
u32 val;
int ret;

ret = pm_runtime_resume_and_get(&vc4_hdmi->pdev->dev);
if (ret)
return ret;
u32 val = HDMI_READ(HDMI_CEC_CNTRL_5);

val = HDMI_READ(HDMI_CEC_CNTRL_5);
val &= ~(VC4_HDMI_CEC_TX_SW_RESET | VC4_HDMI_CEC_RX_SW_RESET |
VC4_HDMI_CEC_CNT_TO_4700_US_MASK |
VC4_HDMI_CEC_CNT_TO_4500_US_MASK);
Expand Down Expand Up @@ -1531,8 +1525,6 @@ static int vc4_hdmi_cec_init(struct vc4_hdmi *vc4_hdmi)
if (ret < 0)
goto err_delete_cec_adap;

pm_runtime_put(&vc4_hdmi->pdev->dev);

return 0;

err_delete_cec_adap:
Expand Down
14 changes: 13 additions & 1 deletion drivers/net/ethernet/amd/xgbe/xgbe-drv.c
Expand Up @@ -721,7 +721,9 @@ static void xgbe_stop_timers(struct xgbe_prv_data *pdata)
if (!channel->tx_ring)
break;

/* Deactivate the Tx timer */
del_timer_sync(&channel->tx_timer);
channel->tx_timer_active = 0;
}
}

Expand Down Expand Up @@ -2557,6 +2559,14 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
buf2_len = xgbe_rx_buf2_len(rdata, packet, len);
len += buf2_len;

if (buf2_len > rdata->rx.buf.dma_len) {
/* Hardware inconsistency within the descriptors
* that has resulted in a length underflow.
*/
error = 1;
goto skip_data;
}

if (!skb) {
skb = xgbe_create_skb(pdata, napi, rdata,
buf1_len);
Expand Down Expand Up @@ -2586,8 +2596,10 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
if (!last || context_next)
goto read_again;

if (!skb)
if (!skb || error) {
dev_kfree_skb(skb);
goto next_packet;
}

/* Be sure we don't exceed the configured MTU */
max_len = netdev->mtu + ETH_HLEN;
Expand Down
32 changes: 14 additions & 18 deletions drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c
Expand Up @@ -183,18 +183,7 @@ void mlx5e_rep_bond_unslave(struct mlx5_eswitch *esw,

static bool mlx5e_rep_is_lag_netdev(struct net_device *netdev)
{
struct mlx5e_rep_priv *rpriv;
struct mlx5e_priv *priv;

/* A given netdev is not a representor or not a slave of LAG configuration */
if (!mlx5e_eswitch_rep(netdev) || !netif_is_lag_port(netdev))
return false;

priv = netdev_priv(netdev);
rpriv = priv->ppriv;

/* Egress acl forward to vport is supported only non-uplink representor */
return rpriv->rep->vport != MLX5_VPORT_UPLINK;
return netif_is_lag_port(netdev) && mlx5e_eswitch_vf_rep(netdev);
}

static void mlx5e_rep_changelowerstate_event(struct net_device *netdev, void *ptr)
Expand All @@ -210,9 +199,6 @@ static void mlx5e_rep_changelowerstate_event(struct net_device *netdev, void *pt
u16 fwd_vport_num;
int err;

if (!mlx5e_rep_is_lag_netdev(netdev))
return;

info = ptr;
lag_info = info->lower_state_info;
/* This is not an event of a representor becoming active slave */
Expand Down Expand Up @@ -266,9 +252,6 @@ static void mlx5e_rep_changeupper_event(struct net_device *netdev, void *ptr)
struct net_device *lag_dev;
struct mlx5e_priv *priv;

if (!mlx5e_rep_is_lag_netdev(netdev))
return;

priv = netdev_priv(netdev);
rpriv = priv->ppriv;
lag_dev = info->upper_dev;
Expand All @@ -293,6 +276,19 @@ static int mlx5e_rep_esw_bond_netevent(struct notifier_block *nb,
unsigned long event, void *ptr)
{
struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
struct mlx5e_rep_priv *rpriv;
struct mlx5e_rep_bond *bond;
struct mlx5e_priv *priv;

if (!mlx5e_rep_is_lag_netdev(netdev))
return NOTIFY_DONE;

bond = container_of(nb, struct mlx5e_rep_bond, nb);
priv = netdev_priv(netdev);
rpriv = mlx5_eswitch_get_uplink_priv(priv->mdev->priv.eswitch, REP_ETH);
/* Verify VF representor is on the same device of the bond handling the netevent. */
if (rpriv->uplink_priv.bond != bond)
return NOTIFY_DONE;

switch (event) {
case NETDEV_CHANGELOWERSTATE:
Expand Down
2 changes: 1 addition & 1 deletion drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
Expand Up @@ -131,7 +131,7 @@ static void mlx5_stop_sync_reset_poll(struct mlx5_core_dev *dev)
{
struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;

del_timer(&fw_reset->timer);
del_timer_sync(&fw_reset->timer);
}

static void mlx5_sync_reset_clear_reset_requested(struct mlx5_core_dev *dev, bool poll_health)
Expand Down
2 changes: 1 addition & 1 deletion drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
Expand Up @@ -292,7 +292,7 @@ static int
create_chain_restore(struct fs_chain *chain)
{
struct mlx5_eswitch *esw = chain->chains->dev->priv.eswitch;
char modact[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)];
u8 modact[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
struct mlx5_fs_chains *chains = chain->chains;
enum mlx5e_tc_attr_to_reg chain_to_reg;
struct mlx5_modify_hdr *mod_hdr;
Expand Down
25 changes: 17 additions & 8 deletions drivers/net/ipa/ipa_endpoint.c
Expand Up @@ -901,27 +901,35 @@ static void ipa_endpoint_replenish(struct ipa_endpoint *endpoint, u32 count)
struct gsi *gsi;
u32 backlog;

if (!endpoint->replenish_enabled) {
if (!test_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags)) {
if (count)
atomic_add(count, &endpoint->replenish_saved);
return;
}

/* If already active, just update the backlog */
if (test_and_set_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags)) {
if (count)
atomic_add(count, &endpoint->replenish_backlog);
return;
}

while (atomic_dec_not_zero(&endpoint->replenish_backlog))
if (ipa_endpoint_replenish_one(endpoint))
goto try_again_later;

clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags);

if (count)
atomic_add(count, &endpoint->replenish_backlog);

return;

try_again_later:
/* The last one didn't succeed, so fix the backlog */
backlog = atomic_inc_return(&endpoint->replenish_backlog);
clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags);

if (count)
atomic_add(count, &endpoint->replenish_backlog);
/* The last one didn't succeed, so fix the backlog */
backlog = atomic_add_return(count + 1, &endpoint->replenish_backlog);

/* Whenever a receive buffer transaction completes we'll try to
* replenish again. It's unlikely, but if we fail to supply even
Expand All @@ -941,7 +949,7 @@ static void ipa_endpoint_replenish_enable(struct ipa_endpoint *endpoint)
u32 max_backlog;
u32 saved;

endpoint->replenish_enabled = true;
set_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags);
while ((saved = atomic_xchg(&endpoint->replenish_saved, 0)))
atomic_add(saved, &endpoint->replenish_backlog);

Expand All @@ -955,7 +963,7 @@ static void ipa_endpoint_replenish_disable(struct ipa_endpoint *endpoint)
{
u32 backlog;

endpoint->replenish_enabled = false;
clear_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags);
while ((backlog = atomic_xchg(&endpoint->replenish_backlog, 0)))
atomic_add(backlog, &endpoint->replenish_saved);
}
Expand Down Expand Up @@ -1472,7 +1480,8 @@ static void ipa_endpoint_setup_one(struct ipa_endpoint *endpoint)
/* RX transactions require a single TRE, so the maximum
* backlog is the same as the maximum outstanding TREs.
*/
endpoint->replenish_enabled = false;
clear_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags);
clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags);
atomic_set(&endpoint->replenish_saved,
gsi_channel_tre_max(gsi, endpoint->channel_id));
atomic_set(&endpoint->replenish_backlog, 0);
Expand Down

0 comments on commit 01105e0

Please sign in to comment.