Skip to content

Commit

Permalink
Merge branch '5.15' into 5.15-tt
Browse files Browse the repository at this point in the history
  • Loading branch information
xanmod committed Feb 5, 2022
2 parents 652c890 + 5b4ae41 commit b50cbba
Show file tree
Hide file tree
Showing 30 changed files with 228 additions and 91 deletions.
2 changes: 1 addition & 1 deletion Makefile
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
PATCHLEVEL = 15
SUBLEVEL = 19
SUBLEVEL = 21
EXTRAVERSION =
NAME = Trick or Treat

Expand Down
10 changes: 1 addition & 9 deletions drivers/gpu/drm/vc4/vc4_hdmi.c
Expand Up @@ -1735,14 +1735,8 @@ static int vc4_hdmi_cec_adap_enable(struct cec_adapter *adap, bool enable)
struct vc4_hdmi *vc4_hdmi = cec_get_drvdata(adap);
/* clock period in microseconds */
const u32 usecs = 1000000 / CEC_CLOCK_FREQ;
u32 val;
int ret;

ret = pm_runtime_resume_and_get(&vc4_hdmi->pdev->dev);
if (ret)
return ret;
u32 val = HDMI_READ(HDMI_CEC_CNTRL_5);

val = HDMI_READ(HDMI_CEC_CNTRL_5);
val &= ~(VC4_HDMI_CEC_TX_SW_RESET | VC4_HDMI_CEC_RX_SW_RESET |
VC4_HDMI_CEC_CNT_TO_4700_US_MASK |
VC4_HDMI_CEC_CNT_TO_4500_US_MASK);
Expand Down Expand Up @@ -1888,8 +1882,6 @@ static int vc4_hdmi_cec_init(struct vc4_hdmi *vc4_hdmi)
if (ret < 0)
goto err_remove_handlers;

pm_runtime_put(&vc4_hdmi->pdev->dev);

return 0;

err_remove_handlers:
Expand Down
14 changes: 13 additions & 1 deletion drivers/net/ethernet/amd/xgbe/xgbe-drv.c
Expand Up @@ -721,7 +721,9 @@ static void xgbe_stop_timers(struct xgbe_prv_data *pdata)
if (!channel->tx_ring)
break;

/* Deactivate the Tx timer */
del_timer_sync(&channel->tx_timer);
channel->tx_timer_active = 0;
}
}

Expand Down Expand Up @@ -2555,6 +2557,14 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
buf2_len = xgbe_rx_buf2_len(rdata, packet, len);
len += buf2_len;

if (buf2_len > rdata->rx.buf.dma_len) {
/* Hardware inconsistency within the descriptors
* that has resulted in a length underflow.
*/
error = 1;
goto skip_data;
}

if (!skb) {
skb = xgbe_create_skb(pdata, napi, rdata,
buf1_len);
Expand Down Expand Up @@ -2584,8 +2594,10 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
if (!last || context_next)
goto read_again;

if (!skb)
if (!skb || error) {
dev_kfree_skb(skb);
goto next_packet;
}

/* Be sure we don't exceed the configured MTU */
max_len = netdev->mtu + ETH_HLEN;
Expand Down
6 changes: 4 additions & 2 deletions drivers/net/ethernet/intel/e1000e/netdev.c
Expand Up @@ -6346,7 +6346,8 @@ static void e1000e_s0ix_entry_flow(struct e1000_adapter *adapter)
u32 mac_data;
u16 phy_data;

if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID) {
if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID &&
hw->mac.type >= e1000_pch_adp) {
/* Request ME configure the device for S0ix */
mac_data = er32(H2ME);
mac_data |= E1000_H2ME_START_DPG;
Expand Down Expand Up @@ -6495,7 +6496,8 @@ static void e1000e_s0ix_exit_flow(struct e1000_adapter *adapter)
u16 phy_data;
u32 i = 0;

if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID) {
if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID &&
hw->mac.type >= e1000_pch_adp) {
/* Request ME unconfigure the device from S0ix */
mac_data = er32(H2ME);
mac_data &= ~E1000_H2ME_START_DPG;
Expand Down
1 change: 1 addition & 0 deletions drivers/net/ethernet/intel/i40e/i40e.h
Expand Up @@ -144,6 +144,7 @@ enum i40e_state_t {
__I40E_VIRTCHNL_OP_PENDING,
__I40E_RECOVERY_MODE,
__I40E_VF_RESETS_DISABLED, /* disable resets during i40e_remove */
__I40E_IN_REMOVE,
__I40E_VFS_RELEASING,
/* This must be last as it determines the size of the BITMAP */
__I40E_STATE_SIZE__,
Expand Down
31 changes: 29 additions & 2 deletions drivers/net/ethernet/intel/i40e/i40e_main.c
Expand Up @@ -5372,14 +5372,24 @@ static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
/* There is no need to reset BW when mqprio mode is on. */
if (pf->flags & I40E_FLAG_TC_MQPRIO)
return 0;
if (!vsi->mqprio_qopt.qopt.hw && !(pf->flags & I40E_FLAG_DCB_ENABLED)) {

if (!vsi->mqprio_qopt.qopt.hw) {
if (pf->flags & I40E_FLAG_DCB_ENABLED)
goto skip_reset;

if (IS_ENABLED(CONFIG_I40E_DCB) &&
i40e_dcb_hw_get_num_tc(&pf->hw) == 1)
goto skip_reset;

ret = i40e_set_bw_limit(vsi, vsi->seid, 0);
if (ret)
dev_info(&pf->pdev->dev,
"Failed to reset tx rate for vsi->seid %u\n",
vsi->seid);
return ret;
}

skip_reset:
memset(&bw_data, 0, sizeof(bw_data));
bw_data.tc_valid_bits = enabled_tc;
for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
Expand Down Expand Up @@ -10853,6 +10863,9 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit,
bool lock_acquired)
{
int ret;

if (test_bit(__I40E_IN_REMOVE, pf->state))
return;
/* Now we wait for GRST to settle out.
* We don't have to delete the VEBs or VSIs from the hw switch
* because the reset will make them disappear.
Expand Down Expand Up @@ -12212,6 +12225,8 @@ int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)

vsi->req_queue_pairs = queue_count;
i40e_prep_for_reset(pf);
if (test_bit(__I40E_IN_REMOVE, pf->state))
return pf->alloc_rss_size;

pf->alloc_rss_size = new_rss_size;

Expand Down Expand Up @@ -13038,6 +13053,10 @@ static int i40e_xdp_setup(struct i40e_vsi *vsi, struct bpf_prog *prog,
if (need_reset)
i40e_prep_for_reset(pf);

/* VSI shall be deleted in a moment, just return EINVAL */
if (test_bit(__I40E_IN_REMOVE, pf->state))
return -EINVAL;

old_prog = xchg(&vsi->xdp_prog, prog);

if (need_reset) {
Expand Down Expand Up @@ -15928,8 +15947,13 @@ static void i40e_remove(struct pci_dev *pdev)
i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), 0);
i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), 0);

while (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
/* Grab __I40E_RESET_RECOVERY_PENDING and set __I40E_IN_REMOVE
* flags, once they are set, i40e_rebuild should not be called as
* i40e_prep_for_reset always returns early.
*/
while (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
usleep_range(1000, 2000);
set_bit(__I40E_IN_REMOVE, pf->state);

if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
set_bit(__I40E_VF_RESETS_DISABLED, pf->state);
Expand Down Expand Up @@ -16128,6 +16152,9 @@ static void i40e_pci_error_reset_done(struct pci_dev *pdev)
{
struct i40e_pf *pf = pci_get_drvdata(pdev);

if (test_bit(__I40E_IN_REMOVE, pf->state))
return;

i40e_reset_and_rebuild(pf, false, false);
}

Expand Down
3 changes: 2 additions & 1 deletion drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
Expand Up @@ -553,7 +553,8 @@ static int mlx5e_htb_convert_rate(struct mlx5e_priv *priv, u64 rate,

static void mlx5e_htb_convert_ceil(struct mlx5e_priv *priv, u64 ceil, u32 *max_average_bw)
{
*max_average_bw = div_u64(ceil, BYTES_IN_MBIT);
/* Hardware treats 0 as "unlimited", set at least 1. */
*max_average_bw = max_t(u32, div_u64(ceil, BYTES_IN_MBIT), 1);

qos_dbg(priv->mdev, "Convert: ceil %llu -> max_average_bw %u\n",
ceil, *max_average_bw);
Expand Down
32 changes: 14 additions & 18 deletions drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c
Expand Up @@ -183,18 +183,7 @@ void mlx5e_rep_bond_unslave(struct mlx5_eswitch *esw,

static bool mlx5e_rep_is_lag_netdev(struct net_device *netdev)
{
struct mlx5e_rep_priv *rpriv;
struct mlx5e_priv *priv;

/* A given netdev is not a representor or not a slave of LAG configuration */
if (!mlx5e_eswitch_rep(netdev) || !netif_is_lag_port(netdev))
return false;

priv = netdev_priv(netdev);
rpriv = priv->ppriv;

/* Egress acl forward to vport is supported only non-uplink representor */
return rpriv->rep->vport != MLX5_VPORT_UPLINK;
return netif_is_lag_port(netdev) && mlx5e_eswitch_vf_rep(netdev);
}

static void mlx5e_rep_changelowerstate_event(struct net_device *netdev, void *ptr)
Expand All @@ -210,9 +199,6 @@ static void mlx5e_rep_changelowerstate_event(struct net_device *netdev, void *pt
u16 fwd_vport_num;
int err;

if (!mlx5e_rep_is_lag_netdev(netdev))
return;

info = ptr;
lag_info = info->lower_state_info;
/* This is not an event of a representor becoming active slave */
Expand Down Expand Up @@ -266,9 +252,6 @@ static void mlx5e_rep_changeupper_event(struct net_device *netdev, void *ptr)
struct net_device *lag_dev;
struct mlx5e_priv *priv;

if (!mlx5e_rep_is_lag_netdev(netdev))
return;

priv = netdev_priv(netdev);
rpriv = priv->ppriv;
lag_dev = info->upper_dev;
Expand All @@ -293,6 +276,19 @@ static int mlx5e_rep_esw_bond_netevent(struct notifier_block *nb,
unsigned long event, void *ptr)
{
struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
struct mlx5e_rep_priv *rpriv;
struct mlx5e_rep_bond *bond;
struct mlx5e_priv *priv;

if (!mlx5e_rep_is_lag_netdev(netdev))
return NOTIFY_DONE;

bond = container_of(nb, struct mlx5e_rep_bond, nb);
priv = netdev_priv(netdev);
rpriv = mlx5_eswitch_get_uplink_priv(priv->mdev->priv.eswitch, REP_ETH);
/* Verify VF representor is on the same device of the bond handling the netevent. */
if (rpriv->uplink_priv.bond != bond)
return NOTIFY_DONE;

switch (event) {
case NETDEV_CHANGELOWERSTATE:
Expand Down
6 changes: 4 additions & 2 deletions drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
Expand Up @@ -491,7 +491,7 @@ void mlx5e_rep_bridge_init(struct mlx5e_priv *priv)
}

br_offloads->netdev_nb.notifier_call = mlx5_esw_bridge_switchdev_port_event;
err = register_netdevice_notifier(&br_offloads->netdev_nb);
err = register_netdevice_notifier_net(&init_net, &br_offloads->netdev_nb);
if (err) {
esw_warn(mdev, "Failed to register bridge offloads netdevice notifier (err=%d)\n",
err);
Expand All @@ -509,7 +509,9 @@ void mlx5e_rep_bridge_init(struct mlx5e_priv *priv)
err_register_swdev:
destroy_workqueue(br_offloads->wq);
err_alloc_wq:
rtnl_lock();
mlx5_esw_bridge_cleanup(esw);
rtnl_unlock();
}

void mlx5e_rep_bridge_cleanup(struct mlx5e_priv *priv)
Expand All @@ -524,7 +526,7 @@ void mlx5e_rep_bridge_cleanup(struct mlx5e_priv *priv)
return;

cancel_delayed_work_sync(&br_offloads->update_work);
unregister_netdevice_notifier(&br_offloads->netdev_nb);
unregister_netdevice_notifier_net(&init_net, &br_offloads->netdev_nb);
unregister_switchdev_blocking_notifier(&br_offloads->nb_blk);
unregister_switchdev_notifier(&br_offloads->nb);
destroy_workqueue(br_offloads->wq);
Expand Down
13 changes: 11 additions & 2 deletions drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
Expand Up @@ -157,11 +157,20 @@ static void mlx5e_ipsec_set_swp(struct sk_buff *skb,
/* Tunnel mode */
if (mode == XFRM_MODE_TUNNEL) {
eseg->swp_inner_l3_offset = skb_inner_network_offset(skb) / 2;
eseg->swp_inner_l4_offset = skb_inner_transport_offset(skb) / 2;
if (xo->proto == IPPROTO_IPV6)
eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
if (inner_ip_hdr(skb)->protocol == IPPROTO_UDP)

switch (xo->inner_ipproto) {
case IPPROTO_UDP:
eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP;
fallthrough;
case IPPROTO_TCP:
/* IP | ESP | IP | [TCP | UDP] */
eseg->swp_inner_l4_offset = skb_inner_transport_offset(skb) / 2;
break;
default:
break;
}
return;
}

Expand Down
4 changes: 4 additions & 0 deletions drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
Expand Up @@ -1385,6 +1385,8 @@ struct mlx5_esw_bridge_offloads *mlx5_esw_bridge_init(struct mlx5_eswitch *esw)
{
struct mlx5_esw_bridge_offloads *br_offloads;

ASSERT_RTNL();

br_offloads = kvzalloc(sizeof(*br_offloads), GFP_KERNEL);
if (!br_offloads)
return ERR_PTR(-ENOMEM);
Expand All @@ -1401,6 +1403,8 @@ void mlx5_esw_bridge_cleanup(struct mlx5_eswitch *esw)
{
struct mlx5_esw_bridge_offloads *br_offloads = esw->br_offloads;

ASSERT_RTNL();

if (!br_offloads)
return;

Expand Down
Expand Up @@ -21,7 +21,7 @@ DECLARE_EVENT_CLASS(mlx5_esw_bridge_fdb_template,
__field(unsigned int, used)
),
TP_fast_assign(
strncpy(__entry->dev_name,
strscpy(__entry->dev_name,
netdev_name(fdb->dev),
IFNAMSIZ);
memcpy(__entry->addr, fdb->key.addr, ETH_ALEN);
Expand Down
2 changes: 1 addition & 1 deletion drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
Expand Up @@ -131,7 +131,7 @@ static void mlx5_stop_sync_reset_poll(struct mlx5_core_dev *dev)
{
struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;

del_timer(&fw_reset->timer);
del_timer_sync(&fw_reset->timer);
}

static void mlx5_sync_reset_clear_reset_requested(struct mlx5_core_dev *dev, bool poll_health)
Expand Down
9 changes: 5 additions & 4 deletions drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
Expand Up @@ -121,12 +121,13 @@ u32 mlx5_chains_get_nf_ft_chain(struct mlx5_fs_chains *chains)

u32 mlx5_chains_get_prio_range(struct mlx5_fs_chains *chains)
{
if (!mlx5_chains_prios_supported(chains))
return 1;

if (mlx5_chains_ignore_flow_level_supported(chains))
return UINT_MAX;

if (!chains->dev->priv.eswitch ||
chains->dev->priv.eswitch->mode != MLX5_ESWITCH_OFFLOADS)
return 1;

/* We should get here only for eswitch case */
return FDB_TC_MAX_PRIO;
}
Expand Down Expand Up @@ -211,7 +212,7 @@ static int
create_chain_restore(struct fs_chain *chain)
{
struct mlx5_eswitch *esw = chain->chains->dev->priv.eswitch;
char modact[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)];
u8 modact[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
struct mlx5_fs_chains *chains = chain->chains;
enum mlx5e_tc_attr_to_reg chain_to_reg;
struct mlx5_modify_hdr *mod_hdr;
Expand Down
9 changes: 5 additions & 4 deletions drivers/net/ethernet/mellanox/mlx5/core/port.c
Expand Up @@ -406,23 +406,24 @@ int mlx5_query_module_eeprom(struct mlx5_core_dev *dev,

switch (module_id) {
case MLX5_MODULE_ID_SFP:
mlx5_sfp_eeprom_params_set(&query.i2c_address, &query.page, &query.offset);
mlx5_sfp_eeprom_params_set(&query.i2c_address, &query.page, &offset);
break;
case MLX5_MODULE_ID_QSFP:
case MLX5_MODULE_ID_QSFP_PLUS:
case MLX5_MODULE_ID_QSFP28:
mlx5_qsfp_eeprom_params_set(&query.i2c_address, &query.page, &query.offset);
mlx5_qsfp_eeprom_params_set(&query.i2c_address, &query.page, &offset);
break;
default:
mlx5_core_err(dev, "Module ID not recognized: 0x%x\n", module_id);
return -EINVAL;
}

if (query.offset + size > MLX5_EEPROM_PAGE_LENGTH)
if (offset + size > MLX5_EEPROM_PAGE_LENGTH)
/* Cross pages read, read until offset 256 in low page */
size -= offset + size - MLX5_EEPROM_PAGE_LENGTH;
size = MLX5_EEPROM_PAGE_LENGTH - offset;

query.size = size;
query.offset = offset;

return mlx5_query_mcia(dev, &query, data);
}
Expand Down

0 comments on commit b50cbba

Please sign in to comment.