Merge 5.15.20 into android13-5.15
Changes in 5.15.20 PCI: pciehp: Fix infinite loop in IRQ handler upon power fault selftests: mptcp: fix ipv6 routing setup net: ipa: use a bitmap for endpoint replenish_enabled net: ipa: prevent concurrent replenish drm/vc4: hdmi: Make sure the device is powered with CEC cgroup-v1: Require capabilities to set release_agent Revert "mm/gup: small refactoring: simplify try_grab_page()" ovl: don't fail copy up if no fileattr support on upper lockd: fix server crash on reboot of client holding lock lockd: fix failure to cleanup client locks net/mlx5e: IPsec: Fix tunnel mode crypto offload for non TCP/UDP traffic net/mlx5: Bridge, take rtnl lock in init error handler net/mlx5: Bridge, ensure dev_name is null-terminated net/mlx5e: Fix handling of wrong devices during bond netevent net/mlx5: Use del_timer_sync in fw reset flow of halting poll net/mlx5e: Fix module EEPROM query net/mlx5: Fix offloading with ESWITCH_IPV4_TTL_MODIFY_ENABLE net/mlx5e: Don't treat small ceil values as unlimited in HTB offload net/mlx5: Bridge, Fix devlink deadlock on net namespace deletion net/mlx5: E-Switch, Fix uninitialized variable modact ipheth: fix EOVERFLOW in ipheth_rcvbulk_callback i40e: Fix reset bw limit when DCB enabled with 1 TC i40e: Fix reset path while removing the driver net: amd-xgbe: ensure to reset the tx_timer_active flag net: amd-xgbe: Fix skb data length underflow fanotify: Fix stale file descriptor in copy_event_to_user() net: sched: fix use-after-free in tc_new_tfilter() rtnetlink: make sure to refresh master_dev/m_ops in __rtnl_newlink() cpuset: Fix the bug that subpart_cpus updated wrongly in update_cpumask() e1000e: Handshake with CSME starts from ADL platforms af_packet: fix data-race in packet_setsockopt / packet_setsockopt tcp: add missing tcp_skb_can_collapse() test in tcp_shift_skb_data() ovl: fix NULL pointer dereference in copy up warning Linux 5.15.20 Signed-off-by: Greg Kroah-Hartman <gregkh@google.com> Change-Id: Ia50333eff81881fac62eb52455b502e6c46ff3d9
This commit is contained in:
2
Makefile
2
Makefile
@@ -1,7 +1,7 @@
|
|||||||
# SPDX-License-Identifier: GPL-2.0
|
# SPDX-License-Identifier: GPL-2.0
|
||||||
VERSION = 5
|
VERSION = 5
|
||||||
PATCHLEVEL = 15
|
PATCHLEVEL = 15
|
||||||
SUBLEVEL = 19
|
SUBLEVEL = 20
|
||||||
EXTRAVERSION =
|
EXTRAVERSION =
|
||||||
NAME = Trick or Treat
|
NAME = Trick or Treat
|
||||||
|
|
||||||
|
|||||||
@@ -1738,18 +1738,18 @@ static int vc4_hdmi_cec_adap_enable(struct cec_adapter *adap, bool enable)
|
|||||||
u32 val;
|
u32 val;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
ret = pm_runtime_resume_and_get(&vc4_hdmi->pdev->dev);
|
|
||||||
if (ret)
|
|
||||||
return ret;
|
|
||||||
|
|
||||||
val = HDMI_READ(HDMI_CEC_CNTRL_5);
|
|
||||||
val &= ~(VC4_HDMI_CEC_TX_SW_RESET | VC4_HDMI_CEC_RX_SW_RESET |
|
|
||||||
VC4_HDMI_CEC_CNT_TO_4700_US_MASK |
|
|
||||||
VC4_HDMI_CEC_CNT_TO_4500_US_MASK);
|
|
||||||
val |= ((4700 / usecs) << VC4_HDMI_CEC_CNT_TO_4700_US_SHIFT) |
|
|
||||||
((4500 / usecs) << VC4_HDMI_CEC_CNT_TO_4500_US_SHIFT);
|
|
||||||
|
|
||||||
if (enable) {
|
if (enable) {
|
||||||
|
ret = pm_runtime_resume_and_get(&vc4_hdmi->pdev->dev);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
val = HDMI_READ(HDMI_CEC_CNTRL_5);
|
||||||
|
val &= ~(VC4_HDMI_CEC_TX_SW_RESET | VC4_HDMI_CEC_RX_SW_RESET |
|
||||||
|
VC4_HDMI_CEC_CNT_TO_4700_US_MASK |
|
||||||
|
VC4_HDMI_CEC_CNT_TO_4500_US_MASK);
|
||||||
|
val |= ((4700 / usecs) << VC4_HDMI_CEC_CNT_TO_4700_US_SHIFT) |
|
||||||
|
((4500 / usecs) << VC4_HDMI_CEC_CNT_TO_4500_US_SHIFT);
|
||||||
|
|
||||||
HDMI_WRITE(HDMI_CEC_CNTRL_5, val |
|
HDMI_WRITE(HDMI_CEC_CNTRL_5, val |
|
||||||
VC4_HDMI_CEC_TX_SW_RESET | VC4_HDMI_CEC_RX_SW_RESET);
|
VC4_HDMI_CEC_TX_SW_RESET | VC4_HDMI_CEC_RX_SW_RESET);
|
||||||
HDMI_WRITE(HDMI_CEC_CNTRL_5, val);
|
HDMI_WRITE(HDMI_CEC_CNTRL_5, val);
|
||||||
@@ -1777,7 +1777,10 @@ static int vc4_hdmi_cec_adap_enable(struct cec_adapter *adap, bool enable)
|
|||||||
HDMI_WRITE(HDMI_CEC_CPU_MASK_SET, VC4_HDMI_CPU_CEC);
|
HDMI_WRITE(HDMI_CEC_CPU_MASK_SET, VC4_HDMI_CPU_CEC);
|
||||||
HDMI_WRITE(HDMI_CEC_CNTRL_5, val |
|
HDMI_WRITE(HDMI_CEC_CNTRL_5, val |
|
||||||
VC4_HDMI_CEC_TX_SW_RESET | VC4_HDMI_CEC_RX_SW_RESET);
|
VC4_HDMI_CEC_TX_SW_RESET | VC4_HDMI_CEC_RX_SW_RESET);
|
||||||
|
|
||||||
|
pm_runtime_put(&vc4_hdmi->pdev->dev);
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1888,8 +1891,6 @@ static int vc4_hdmi_cec_init(struct vc4_hdmi *vc4_hdmi)
|
|||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto err_remove_handlers;
|
goto err_remove_handlers;
|
||||||
|
|
||||||
pm_runtime_put(&vc4_hdmi->pdev->dev);
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
err_remove_handlers:
|
err_remove_handlers:
|
||||||
|
|||||||
@@ -721,7 +721,9 @@ static void xgbe_stop_timers(struct xgbe_prv_data *pdata)
|
|||||||
if (!channel->tx_ring)
|
if (!channel->tx_ring)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
/* Deactivate the Tx timer */
|
||||||
del_timer_sync(&channel->tx_timer);
|
del_timer_sync(&channel->tx_timer);
|
||||||
|
channel->tx_timer_active = 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -2555,6 +2557,14 @@ read_again:
|
|||||||
buf2_len = xgbe_rx_buf2_len(rdata, packet, len);
|
buf2_len = xgbe_rx_buf2_len(rdata, packet, len);
|
||||||
len += buf2_len;
|
len += buf2_len;
|
||||||
|
|
||||||
|
if (buf2_len > rdata->rx.buf.dma_len) {
|
||||||
|
/* Hardware inconsistency within the descriptors
|
||||||
|
* that has resulted in a length underflow.
|
||||||
|
*/
|
||||||
|
error = 1;
|
||||||
|
goto skip_data;
|
||||||
|
}
|
||||||
|
|
||||||
if (!skb) {
|
if (!skb) {
|
||||||
skb = xgbe_create_skb(pdata, napi, rdata,
|
skb = xgbe_create_skb(pdata, napi, rdata,
|
||||||
buf1_len);
|
buf1_len);
|
||||||
@@ -2584,8 +2594,10 @@ skip_data:
|
|||||||
if (!last || context_next)
|
if (!last || context_next)
|
||||||
goto read_again;
|
goto read_again;
|
||||||
|
|
||||||
if (!skb)
|
if (!skb || error) {
|
||||||
|
dev_kfree_skb(skb);
|
||||||
goto next_packet;
|
goto next_packet;
|
||||||
|
}
|
||||||
|
|
||||||
/* Be sure we don't exceed the configured MTU */
|
/* Be sure we don't exceed the configured MTU */
|
||||||
max_len = netdev->mtu + ETH_HLEN;
|
max_len = netdev->mtu + ETH_HLEN;
|
||||||
|
|||||||
@@ -6346,7 +6346,8 @@ static void e1000e_s0ix_entry_flow(struct e1000_adapter *adapter)
|
|||||||
u32 mac_data;
|
u32 mac_data;
|
||||||
u16 phy_data;
|
u16 phy_data;
|
||||||
|
|
||||||
if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID) {
|
if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID &&
|
||||||
|
hw->mac.type >= e1000_pch_adp) {
|
||||||
/* Request ME configure the device for S0ix */
|
/* Request ME configure the device for S0ix */
|
||||||
mac_data = er32(H2ME);
|
mac_data = er32(H2ME);
|
||||||
mac_data |= E1000_H2ME_START_DPG;
|
mac_data |= E1000_H2ME_START_DPG;
|
||||||
@@ -6495,7 +6496,8 @@ static void e1000e_s0ix_exit_flow(struct e1000_adapter *adapter)
|
|||||||
u16 phy_data;
|
u16 phy_data;
|
||||||
u32 i = 0;
|
u32 i = 0;
|
||||||
|
|
||||||
if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID) {
|
if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID &&
|
||||||
|
hw->mac.type >= e1000_pch_adp) {
|
||||||
/* Request ME unconfigure the device from S0ix */
|
/* Request ME unconfigure the device from S0ix */
|
||||||
mac_data = er32(H2ME);
|
mac_data = er32(H2ME);
|
||||||
mac_data &= ~E1000_H2ME_START_DPG;
|
mac_data &= ~E1000_H2ME_START_DPG;
|
||||||
|
|||||||
@@ -144,6 +144,7 @@ enum i40e_state_t {
|
|||||||
__I40E_VIRTCHNL_OP_PENDING,
|
__I40E_VIRTCHNL_OP_PENDING,
|
||||||
__I40E_RECOVERY_MODE,
|
__I40E_RECOVERY_MODE,
|
||||||
__I40E_VF_RESETS_DISABLED, /* disable resets during i40e_remove */
|
__I40E_VF_RESETS_DISABLED, /* disable resets during i40e_remove */
|
||||||
|
__I40E_IN_REMOVE,
|
||||||
__I40E_VFS_RELEASING,
|
__I40E_VFS_RELEASING,
|
||||||
/* This must be last as it determines the size of the BITMAP */
|
/* This must be last as it determines the size of the BITMAP */
|
||||||
__I40E_STATE_SIZE__,
|
__I40E_STATE_SIZE__,
|
||||||
|
|||||||
@@ -5372,7 +5372,15 @@ static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
|
|||||||
/* There is no need to reset BW when mqprio mode is on. */
|
/* There is no need to reset BW when mqprio mode is on. */
|
||||||
if (pf->flags & I40E_FLAG_TC_MQPRIO)
|
if (pf->flags & I40E_FLAG_TC_MQPRIO)
|
||||||
return 0;
|
return 0;
|
||||||
if (!vsi->mqprio_qopt.qopt.hw && !(pf->flags & I40E_FLAG_DCB_ENABLED)) {
|
|
||||||
|
if (!vsi->mqprio_qopt.qopt.hw) {
|
||||||
|
if (pf->flags & I40E_FLAG_DCB_ENABLED)
|
||||||
|
goto skip_reset;
|
||||||
|
|
||||||
|
if (IS_ENABLED(CONFIG_I40E_DCB) &&
|
||||||
|
i40e_dcb_hw_get_num_tc(&pf->hw) == 1)
|
||||||
|
goto skip_reset;
|
||||||
|
|
||||||
ret = i40e_set_bw_limit(vsi, vsi->seid, 0);
|
ret = i40e_set_bw_limit(vsi, vsi->seid, 0);
|
||||||
if (ret)
|
if (ret)
|
||||||
dev_info(&pf->pdev->dev,
|
dev_info(&pf->pdev->dev,
|
||||||
@@ -5380,6 +5388,8 @@ static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
|
|||||||
vsi->seid);
|
vsi->seid);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
skip_reset:
|
||||||
memset(&bw_data, 0, sizeof(bw_data));
|
memset(&bw_data, 0, sizeof(bw_data));
|
||||||
bw_data.tc_valid_bits = enabled_tc;
|
bw_data.tc_valid_bits = enabled_tc;
|
||||||
for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
|
for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
|
||||||
@@ -10853,6 +10863,9 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit,
|
|||||||
bool lock_acquired)
|
bool lock_acquired)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
if (test_bit(__I40E_IN_REMOVE, pf->state))
|
||||||
|
return;
|
||||||
/* Now we wait for GRST to settle out.
|
/* Now we wait for GRST to settle out.
|
||||||
* We don't have to delete the VEBs or VSIs from the hw switch
|
* We don't have to delete the VEBs or VSIs from the hw switch
|
||||||
* because the reset will make them disappear.
|
* because the reset will make them disappear.
|
||||||
@@ -12212,6 +12225,8 @@ int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
|
|||||||
|
|
||||||
vsi->req_queue_pairs = queue_count;
|
vsi->req_queue_pairs = queue_count;
|
||||||
i40e_prep_for_reset(pf);
|
i40e_prep_for_reset(pf);
|
||||||
|
if (test_bit(__I40E_IN_REMOVE, pf->state))
|
||||||
|
return pf->alloc_rss_size;
|
||||||
|
|
||||||
pf->alloc_rss_size = new_rss_size;
|
pf->alloc_rss_size = new_rss_size;
|
||||||
|
|
||||||
@@ -13038,6 +13053,10 @@ static int i40e_xdp_setup(struct i40e_vsi *vsi, struct bpf_prog *prog,
|
|||||||
if (need_reset)
|
if (need_reset)
|
||||||
i40e_prep_for_reset(pf);
|
i40e_prep_for_reset(pf);
|
||||||
|
|
||||||
|
/* VSI shall be deleted in a moment, just return EINVAL */
|
||||||
|
if (test_bit(__I40E_IN_REMOVE, pf->state))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
old_prog = xchg(&vsi->xdp_prog, prog);
|
old_prog = xchg(&vsi->xdp_prog, prog);
|
||||||
|
|
||||||
if (need_reset) {
|
if (need_reset) {
|
||||||
@@ -15928,8 +15947,13 @@ static void i40e_remove(struct pci_dev *pdev)
|
|||||||
i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), 0);
|
i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), 0);
|
||||||
i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), 0);
|
i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), 0);
|
||||||
|
|
||||||
while (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
|
/* Grab __I40E_RESET_RECOVERY_PENDING and set __I40E_IN_REMOVE
|
||||||
|
* flags, once they are set, i40e_rebuild should not be called as
|
||||||
|
* i40e_prep_for_reset always returns early.
|
||||||
|
*/
|
||||||
|
while (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
|
||||||
usleep_range(1000, 2000);
|
usleep_range(1000, 2000);
|
||||||
|
set_bit(__I40E_IN_REMOVE, pf->state);
|
||||||
|
|
||||||
if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
|
if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
|
||||||
set_bit(__I40E_VF_RESETS_DISABLED, pf->state);
|
set_bit(__I40E_VF_RESETS_DISABLED, pf->state);
|
||||||
@@ -16128,6 +16152,9 @@ static void i40e_pci_error_reset_done(struct pci_dev *pdev)
|
|||||||
{
|
{
|
||||||
struct i40e_pf *pf = pci_get_drvdata(pdev);
|
struct i40e_pf *pf = pci_get_drvdata(pdev);
|
||||||
|
|
||||||
|
if (test_bit(__I40E_IN_REMOVE, pf->state))
|
||||||
|
return;
|
||||||
|
|
||||||
i40e_reset_and_rebuild(pf, false, false);
|
i40e_reset_and_rebuild(pf, false, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -553,7 +553,8 @@ static int mlx5e_htb_convert_rate(struct mlx5e_priv *priv, u64 rate,
|
|||||||
|
|
||||||
static void mlx5e_htb_convert_ceil(struct mlx5e_priv *priv, u64 ceil, u32 *max_average_bw)
|
static void mlx5e_htb_convert_ceil(struct mlx5e_priv *priv, u64 ceil, u32 *max_average_bw)
|
||||||
{
|
{
|
||||||
*max_average_bw = div_u64(ceil, BYTES_IN_MBIT);
|
/* Hardware treats 0 as "unlimited", set at least 1. */
|
||||||
|
*max_average_bw = max_t(u32, div_u64(ceil, BYTES_IN_MBIT), 1);
|
||||||
|
|
||||||
qos_dbg(priv->mdev, "Convert: ceil %llu -> max_average_bw %u\n",
|
qos_dbg(priv->mdev, "Convert: ceil %llu -> max_average_bw %u\n",
|
||||||
ceil, *max_average_bw);
|
ceil, *max_average_bw);
|
||||||
|
|||||||
@@ -183,18 +183,7 @@ void mlx5e_rep_bond_unslave(struct mlx5_eswitch *esw,
|
|||||||
|
|
||||||
static bool mlx5e_rep_is_lag_netdev(struct net_device *netdev)
|
static bool mlx5e_rep_is_lag_netdev(struct net_device *netdev)
|
||||||
{
|
{
|
||||||
struct mlx5e_rep_priv *rpriv;
|
return netif_is_lag_port(netdev) && mlx5e_eswitch_vf_rep(netdev);
|
||||||
struct mlx5e_priv *priv;
|
|
||||||
|
|
||||||
/* A given netdev is not a representor or not a slave of LAG configuration */
|
|
||||||
if (!mlx5e_eswitch_rep(netdev) || !netif_is_lag_port(netdev))
|
|
||||||
return false;
|
|
||||||
|
|
||||||
priv = netdev_priv(netdev);
|
|
||||||
rpriv = priv->ppriv;
|
|
||||||
|
|
||||||
/* Egress acl forward to vport is supported only non-uplink representor */
|
|
||||||
return rpriv->rep->vport != MLX5_VPORT_UPLINK;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void mlx5e_rep_changelowerstate_event(struct net_device *netdev, void *ptr)
|
static void mlx5e_rep_changelowerstate_event(struct net_device *netdev, void *ptr)
|
||||||
@@ -210,9 +199,6 @@ static void mlx5e_rep_changelowerstate_event(struct net_device *netdev, void *pt
|
|||||||
u16 fwd_vport_num;
|
u16 fwd_vport_num;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
if (!mlx5e_rep_is_lag_netdev(netdev))
|
|
||||||
return;
|
|
||||||
|
|
||||||
info = ptr;
|
info = ptr;
|
||||||
lag_info = info->lower_state_info;
|
lag_info = info->lower_state_info;
|
||||||
/* This is not an event of a representor becoming active slave */
|
/* This is not an event of a representor becoming active slave */
|
||||||
@@ -266,9 +252,6 @@ static void mlx5e_rep_changeupper_event(struct net_device *netdev, void *ptr)
|
|||||||
struct net_device *lag_dev;
|
struct net_device *lag_dev;
|
||||||
struct mlx5e_priv *priv;
|
struct mlx5e_priv *priv;
|
||||||
|
|
||||||
if (!mlx5e_rep_is_lag_netdev(netdev))
|
|
||||||
return;
|
|
||||||
|
|
||||||
priv = netdev_priv(netdev);
|
priv = netdev_priv(netdev);
|
||||||
rpriv = priv->ppriv;
|
rpriv = priv->ppriv;
|
||||||
lag_dev = info->upper_dev;
|
lag_dev = info->upper_dev;
|
||||||
@@ -293,6 +276,19 @@ static int mlx5e_rep_esw_bond_netevent(struct notifier_block *nb,
|
|||||||
unsigned long event, void *ptr)
|
unsigned long event, void *ptr)
|
||||||
{
|
{
|
||||||
struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
|
struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
|
||||||
|
struct mlx5e_rep_priv *rpriv;
|
||||||
|
struct mlx5e_rep_bond *bond;
|
||||||
|
struct mlx5e_priv *priv;
|
||||||
|
|
||||||
|
if (!mlx5e_rep_is_lag_netdev(netdev))
|
||||||
|
return NOTIFY_DONE;
|
||||||
|
|
||||||
|
bond = container_of(nb, struct mlx5e_rep_bond, nb);
|
||||||
|
priv = netdev_priv(netdev);
|
||||||
|
rpriv = mlx5_eswitch_get_uplink_priv(priv->mdev->priv.eswitch, REP_ETH);
|
||||||
|
/* Verify VF representor is on the same device of the bond handling the netevent. */
|
||||||
|
if (rpriv->uplink_priv.bond != bond)
|
||||||
|
return NOTIFY_DONE;
|
||||||
|
|
||||||
switch (event) {
|
switch (event) {
|
||||||
case NETDEV_CHANGELOWERSTATE:
|
case NETDEV_CHANGELOWERSTATE:
|
||||||
|
|||||||
@@ -491,7 +491,7 @@ void mlx5e_rep_bridge_init(struct mlx5e_priv *priv)
|
|||||||
}
|
}
|
||||||
|
|
||||||
br_offloads->netdev_nb.notifier_call = mlx5_esw_bridge_switchdev_port_event;
|
br_offloads->netdev_nb.notifier_call = mlx5_esw_bridge_switchdev_port_event;
|
||||||
err = register_netdevice_notifier(&br_offloads->netdev_nb);
|
err = register_netdevice_notifier_net(&init_net, &br_offloads->netdev_nb);
|
||||||
if (err) {
|
if (err) {
|
||||||
esw_warn(mdev, "Failed to register bridge offloads netdevice notifier (err=%d)\n",
|
esw_warn(mdev, "Failed to register bridge offloads netdevice notifier (err=%d)\n",
|
||||||
err);
|
err);
|
||||||
@@ -509,7 +509,9 @@ err_register_swdev_blk:
|
|||||||
err_register_swdev:
|
err_register_swdev:
|
||||||
destroy_workqueue(br_offloads->wq);
|
destroy_workqueue(br_offloads->wq);
|
||||||
err_alloc_wq:
|
err_alloc_wq:
|
||||||
|
rtnl_lock();
|
||||||
mlx5_esw_bridge_cleanup(esw);
|
mlx5_esw_bridge_cleanup(esw);
|
||||||
|
rtnl_unlock();
|
||||||
}
|
}
|
||||||
|
|
||||||
void mlx5e_rep_bridge_cleanup(struct mlx5e_priv *priv)
|
void mlx5e_rep_bridge_cleanup(struct mlx5e_priv *priv)
|
||||||
@@ -524,7 +526,7 @@ void mlx5e_rep_bridge_cleanup(struct mlx5e_priv *priv)
|
|||||||
return;
|
return;
|
||||||
|
|
||||||
cancel_delayed_work_sync(&br_offloads->update_work);
|
cancel_delayed_work_sync(&br_offloads->update_work);
|
||||||
unregister_netdevice_notifier(&br_offloads->netdev_nb);
|
unregister_netdevice_notifier_net(&init_net, &br_offloads->netdev_nb);
|
||||||
unregister_switchdev_blocking_notifier(&br_offloads->nb_blk);
|
unregister_switchdev_blocking_notifier(&br_offloads->nb_blk);
|
||||||
unregister_switchdev_notifier(&br_offloads->nb);
|
unregister_switchdev_notifier(&br_offloads->nb);
|
||||||
destroy_workqueue(br_offloads->wq);
|
destroy_workqueue(br_offloads->wq);
|
||||||
|
|||||||
@@ -157,11 +157,20 @@ static void mlx5e_ipsec_set_swp(struct sk_buff *skb,
|
|||||||
/* Tunnel mode */
|
/* Tunnel mode */
|
||||||
if (mode == XFRM_MODE_TUNNEL) {
|
if (mode == XFRM_MODE_TUNNEL) {
|
||||||
eseg->swp_inner_l3_offset = skb_inner_network_offset(skb) / 2;
|
eseg->swp_inner_l3_offset = skb_inner_network_offset(skb) / 2;
|
||||||
eseg->swp_inner_l4_offset = skb_inner_transport_offset(skb) / 2;
|
|
||||||
if (xo->proto == IPPROTO_IPV6)
|
if (xo->proto == IPPROTO_IPV6)
|
||||||
eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
|
eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
|
||||||
if (inner_ip_hdr(skb)->protocol == IPPROTO_UDP)
|
|
||||||
|
switch (xo->inner_ipproto) {
|
||||||
|
case IPPROTO_UDP:
|
||||||
eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP;
|
eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP;
|
||||||
|
fallthrough;
|
||||||
|
case IPPROTO_TCP:
|
||||||
|
/* IP | ESP | IP | [TCP | UDP] */
|
||||||
|
eseg->swp_inner_l4_offset = skb_inner_transport_offset(skb) / 2;
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
break;
|
||||||
|
}
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1385,6 +1385,8 @@ struct mlx5_esw_bridge_offloads *mlx5_esw_bridge_init(struct mlx5_eswitch *esw)
|
|||||||
{
|
{
|
||||||
struct mlx5_esw_bridge_offloads *br_offloads;
|
struct mlx5_esw_bridge_offloads *br_offloads;
|
||||||
|
|
||||||
|
ASSERT_RTNL();
|
||||||
|
|
||||||
br_offloads = kvzalloc(sizeof(*br_offloads), GFP_KERNEL);
|
br_offloads = kvzalloc(sizeof(*br_offloads), GFP_KERNEL);
|
||||||
if (!br_offloads)
|
if (!br_offloads)
|
||||||
return ERR_PTR(-ENOMEM);
|
return ERR_PTR(-ENOMEM);
|
||||||
@@ -1401,6 +1403,8 @@ void mlx5_esw_bridge_cleanup(struct mlx5_eswitch *esw)
|
|||||||
{
|
{
|
||||||
struct mlx5_esw_bridge_offloads *br_offloads = esw->br_offloads;
|
struct mlx5_esw_bridge_offloads *br_offloads = esw->br_offloads;
|
||||||
|
|
||||||
|
ASSERT_RTNL();
|
||||||
|
|
||||||
if (!br_offloads)
|
if (!br_offloads)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
|||||||
@@ -21,7 +21,7 @@ DECLARE_EVENT_CLASS(mlx5_esw_bridge_fdb_template,
|
|||||||
__field(unsigned int, used)
|
__field(unsigned int, used)
|
||||||
),
|
),
|
||||||
TP_fast_assign(
|
TP_fast_assign(
|
||||||
strncpy(__entry->dev_name,
|
strscpy(__entry->dev_name,
|
||||||
netdev_name(fdb->dev),
|
netdev_name(fdb->dev),
|
||||||
IFNAMSIZ);
|
IFNAMSIZ);
|
||||||
memcpy(__entry->addr, fdb->key.addr, ETH_ALEN);
|
memcpy(__entry->addr, fdb->key.addr, ETH_ALEN);
|
||||||
|
|||||||
@@ -131,7 +131,7 @@ static void mlx5_stop_sync_reset_poll(struct mlx5_core_dev *dev)
|
|||||||
{
|
{
|
||||||
struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
|
struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
|
||||||
|
|
||||||
del_timer(&fw_reset->timer);
|
del_timer_sync(&fw_reset->timer);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void mlx5_sync_reset_clear_reset_requested(struct mlx5_core_dev *dev, bool poll_health)
|
static void mlx5_sync_reset_clear_reset_requested(struct mlx5_core_dev *dev, bool poll_health)
|
||||||
|
|||||||
@@ -121,12 +121,13 @@ u32 mlx5_chains_get_nf_ft_chain(struct mlx5_fs_chains *chains)
|
|||||||
|
|
||||||
u32 mlx5_chains_get_prio_range(struct mlx5_fs_chains *chains)
|
u32 mlx5_chains_get_prio_range(struct mlx5_fs_chains *chains)
|
||||||
{
|
{
|
||||||
if (!mlx5_chains_prios_supported(chains))
|
|
||||||
return 1;
|
|
||||||
|
|
||||||
if (mlx5_chains_ignore_flow_level_supported(chains))
|
if (mlx5_chains_ignore_flow_level_supported(chains))
|
||||||
return UINT_MAX;
|
return UINT_MAX;
|
||||||
|
|
||||||
|
if (!chains->dev->priv.eswitch ||
|
||||||
|
chains->dev->priv.eswitch->mode != MLX5_ESWITCH_OFFLOADS)
|
||||||
|
return 1;
|
||||||
|
|
||||||
/* We should get here only for eswitch case */
|
/* We should get here only for eswitch case */
|
||||||
return FDB_TC_MAX_PRIO;
|
return FDB_TC_MAX_PRIO;
|
||||||
}
|
}
|
||||||
@@ -211,7 +212,7 @@ static int
|
|||||||
create_chain_restore(struct fs_chain *chain)
|
create_chain_restore(struct fs_chain *chain)
|
||||||
{
|
{
|
||||||
struct mlx5_eswitch *esw = chain->chains->dev->priv.eswitch;
|
struct mlx5_eswitch *esw = chain->chains->dev->priv.eswitch;
|
||||||
char modact[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)];
|
u8 modact[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
|
||||||
struct mlx5_fs_chains *chains = chain->chains;
|
struct mlx5_fs_chains *chains = chain->chains;
|
||||||
enum mlx5e_tc_attr_to_reg chain_to_reg;
|
enum mlx5e_tc_attr_to_reg chain_to_reg;
|
||||||
struct mlx5_modify_hdr *mod_hdr;
|
struct mlx5_modify_hdr *mod_hdr;
|
||||||
|
|||||||
@@ -406,23 +406,24 @@ int mlx5_query_module_eeprom(struct mlx5_core_dev *dev,
|
|||||||
|
|
||||||
switch (module_id) {
|
switch (module_id) {
|
||||||
case MLX5_MODULE_ID_SFP:
|
case MLX5_MODULE_ID_SFP:
|
||||||
mlx5_sfp_eeprom_params_set(&query.i2c_address, &query.page, &query.offset);
|
mlx5_sfp_eeprom_params_set(&query.i2c_address, &query.page, &offset);
|
||||||
break;
|
break;
|
||||||
case MLX5_MODULE_ID_QSFP:
|
case MLX5_MODULE_ID_QSFP:
|
||||||
case MLX5_MODULE_ID_QSFP_PLUS:
|
case MLX5_MODULE_ID_QSFP_PLUS:
|
||||||
case MLX5_MODULE_ID_QSFP28:
|
case MLX5_MODULE_ID_QSFP28:
|
||||||
mlx5_qsfp_eeprom_params_set(&query.i2c_address, &query.page, &query.offset);
|
mlx5_qsfp_eeprom_params_set(&query.i2c_address, &query.page, &offset);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
mlx5_core_err(dev, "Module ID not recognized: 0x%x\n", module_id);
|
mlx5_core_err(dev, "Module ID not recognized: 0x%x\n", module_id);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (query.offset + size > MLX5_EEPROM_PAGE_LENGTH)
|
if (offset + size > MLX5_EEPROM_PAGE_LENGTH)
|
||||||
/* Cross pages read, read until offset 256 in low page */
|
/* Cross pages read, read until offset 256 in low page */
|
||||||
size -= offset + size - MLX5_EEPROM_PAGE_LENGTH;
|
size = MLX5_EEPROM_PAGE_LENGTH - offset;
|
||||||
|
|
||||||
query.size = size;
|
query.size = size;
|
||||||
|
query.offset = offset;
|
||||||
|
|
||||||
return mlx5_query_mcia(dev, &query, data);
|
return mlx5_query_mcia(dev, &query, data);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1069,21 +1069,33 @@ static void ipa_endpoint_replenish(struct ipa_endpoint *endpoint, bool add_one)
|
|||||||
u32 backlog;
|
u32 backlog;
|
||||||
int delta;
|
int delta;
|
||||||
|
|
||||||
if (!endpoint->replenish_enabled) {
|
if (!test_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags)) {
|
||||||
if (add_one)
|
if (add_one)
|
||||||
atomic_inc(&endpoint->replenish_saved);
|
atomic_inc(&endpoint->replenish_saved);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* If already active, just update the backlog */
|
||||||
|
if (test_and_set_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags)) {
|
||||||
|
if (add_one)
|
||||||
|
atomic_inc(&endpoint->replenish_backlog);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
while (atomic_dec_not_zero(&endpoint->replenish_backlog))
|
while (atomic_dec_not_zero(&endpoint->replenish_backlog))
|
||||||
if (ipa_endpoint_replenish_one(endpoint))
|
if (ipa_endpoint_replenish_one(endpoint))
|
||||||
goto try_again_later;
|
goto try_again_later;
|
||||||
|
|
||||||
|
clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags);
|
||||||
|
|
||||||
if (add_one)
|
if (add_one)
|
||||||
atomic_inc(&endpoint->replenish_backlog);
|
atomic_inc(&endpoint->replenish_backlog);
|
||||||
|
|
||||||
return;
|
return;
|
||||||
|
|
||||||
try_again_later:
|
try_again_later:
|
||||||
|
clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags);
|
||||||
|
|
||||||
/* The last one didn't succeed, so fix the backlog */
|
/* The last one didn't succeed, so fix the backlog */
|
||||||
delta = add_one ? 2 : 1;
|
delta = add_one ? 2 : 1;
|
||||||
backlog = atomic_add_return(delta, &endpoint->replenish_backlog);
|
backlog = atomic_add_return(delta, &endpoint->replenish_backlog);
|
||||||
@@ -1106,7 +1118,7 @@ static void ipa_endpoint_replenish_enable(struct ipa_endpoint *endpoint)
|
|||||||
u32 max_backlog;
|
u32 max_backlog;
|
||||||
u32 saved;
|
u32 saved;
|
||||||
|
|
||||||
endpoint->replenish_enabled = true;
|
set_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags);
|
||||||
while ((saved = atomic_xchg(&endpoint->replenish_saved, 0)))
|
while ((saved = atomic_xchg(&endpoint->replenish_saved, 0)))
|
||||||
atomic_add(saved, &endpoint->replenish_backlog);
|
atomic_add(saved, &endpoint->replenish_backlog);
|
||||||
|
|
||||||
@@ -1120,7 +1132,7 @@ static void ipa_endpoint_replenish_disable(struct ipa_endpoint *endpoint)
|
|||||||
{
|
{
|
||||||
u32 backlog;
|
u32 backlog;
|
||||||
|
|
||||||
endpoint->replenish_enabled = false;
|
clear_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags);
|
||||||
while ((backlog = atomic_xchg(&endpoint->replenish_backlog, 0)))
|
while ((backlog = atomic_xchg(&endpoint->replenish_backlog, 0)))
|
||||||
atomic_add(backlog, &endpoint->replenish_saved);
|
atomic_add(backlog, &endpoint->replenish_saved);
|
||||||
}
|
}
|
||||||
@@ -1665,7 +1677,8 @@ static void ipa_endpoint_setup_one(struct ipa_endpoint *endpoint)
|
|||||||
/* RX transactions require a single TRE, so the maximum
|
/* RX transactions require a single TRE, so the maximum
|
||||||
* backlog is the same as the maximum outstanding TREs.
|
* backlog is the same as the maximum outstanding TREs.
|
||||||
*/
|
*/
|
||||||
endpoint->replenish_enabled = false;
|
clear_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags);
|
||||||
|
clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags);
|
||||||
atomic_set(&endpoint->replenish_saved,
|
atomic_set(&endpoint->replenish_saved,
|
||||||
gsi_channel_tre_max(gsi, endpoint->channel_id));
|
gsi_channel_tre_max(gsi, endpoint->channel_id));
|
||||||
atomic_set(&endpoint->replenish_backlog, 0);
|
atomic_set(&endpoint->replenish_backlog, 0);
|
||||||
|
|||||||
@@ -40,6 +40,19 @@ enum ipa_endpoint_name {
|
|||||||
|
|
||||||
#define IPA_ENDPOINT_MAX 32 /* Max supported by driver */
|
#define IPA_ENDPOINT_MAX 32 /* Max supported by driver */
|
||||||
|
|
||||||
|
/**
|
||||||
|
* enum ipa_replenish_flag: RX buffer replenish flags
|
||||||
|
*
|
||||||
|
* @IPA_REPLENISH_ENABLED: Whether receive buffer replenishing is enabled
|
||||||
|
* @IPA_REPLENISH_ACTIVE: Whether replenishing is underway
|
||||||
|
* @IPA_REPLENISH_COUNT: Number of defined replenish flags
|
||||||
|
*/
|
||||||
|
enum ipa_replenish_flag {
|
||||||
|
IPA_REPLENISH_ENABLED,
|
||||||
|
IPA_REPLENISH_ACTIVE,
|
||||||
|
IPA_REPLENISH_COUNT, /* Number of flags (must be last) */
|
||||||
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* struct ipa_endpoint - IPA endpoint information
|
* struct ipa_endpoint - IPA endpoint information
|
||||||
* @ipa: IPA pointer
|
* @ipa: IPA pointer
|
||||||
@@ -51,7 +64,7 @@ enum ipa_endpoint_name {
|
|||||||
* @trans_tre_max: Maximum number of TRE descriptors per transaction
|
* @trans_tre_max: Maximum number of TRE descriptors per transaction
|
||||||
* @evt_ring_id: GSI event ring used by the endpoint
|
* @evt_ring_id: GSI event ring used by the endpoint
|
||||||
* @netdev: Network device pointer, if endpoint uses one
|
* @netdev: Network device pointer, if endpoint uses one
|
||||||
* @replenish_enabled: Whether receive buffer replenishing is enabled
|
* @replenish_flags: Replenishing state flags
|
||||||
* @replenish_ready: Number of replenish transactions without doorbell
|
* @replenish_ready: Number of replenish transactions without doorbell
|
||||||
* @replenish_saved: Replenish requests held while disabled
|
* @replenish_saved: Replenish requests held while disabled
|
||||||
* @replenish_backlog: Number of buffers needed to fill hardware queue
|
* @replenish_backlog: Number of buffers needed to fill hardware queue
|
||||||
@@ -72,7 +85,7 @@ struct ipa_endpoint {
|
|||||||
struct net_device *netdev;
|
struct net_device *netdev;
|
||||||
|
|
||||||
/* Receive buffer replenishing for RX endpoints */
|
/* Receive buffer replenishing for RX endpoints */
|
||||||
bool replenish_enabled;
|
DECLARE_BITMAP(replenish_flags, IPA_REPLENISH_COUNT);
|
||||||
u32 replenish_ready;
|
u32 replenish_ready;
|
||||||
atomic_t replenish_saved;
|
atomic_t replenish_saved;
|
||||||
atomic_t replenish_backlog;
|
atomic_t replenish_backlog;
|
||||||
|
|||||||
@@ -121,7 +121,7 @@ static int ipheth_alloc_urbs(struct ipheth_device *iphone)
|
|||||||
if (tx_buf == NULL)
|
if (tx_buf == NULL)
|
||||||
goto free_rx_urb;
|
goto free_rx_urb;
|
||||||
|
|
||||||
rx_buf = usb_alloc_coherent(iphone->udev, IPHETH_BUF_SIZE,
|
rx_buf = usb_alloc_coherent(iphone->udev, IPHETH_BUF_SIZE + IPHETH_IP_ALIGN,
|
||||||
GFP_KERNEL, &rx_urb->transfer_dma);
|
GFP_KERNEL, &rx_urb->transfer_dma);
|
||||||
if (rx_buf == NULL)
|
if (rx_buf == NULL)
|
||||||
goto free_tx_buf;
|
goto free_tx_buf;
|
||||||
@@ -146,7 +146,7 @@ error_nomem:
|
|||||||
|
|
||||||
static void ipheth_free_urbs(struct ipheth_device *iphone)
|
static void ipheth_free_urbs(struct ipheth_device *iphone)
|
||||||
{
|
{
|
||||||
usb_free_coherent(iphone->udev, IPHETH_BUF_SIZE, iphone->rx_buf,
|
usb_free_coherent(iphone->udev, IPHETH_BUF_SIZE + IPHETH_IP_ALIGN, iphone->rx_buf,
|
||||||
iphone->rx_urb->transfer_dma);
|
iphone->rx_urb->transfer_dma);
|
||||||
usb_free_coherent(iphone->udev, IPHETH_BUF_SIZE, iphone->tx_buf,
|
usb_free_coherent(iphone->udev, IPHETH_BUF_SIZE, iphone->tx_buf,
|
||||||
iphone->tx_urb->transfer_dma);
|
iphone->tx_urb->transfer_dma);
|
||||||
@@ -317,7 +317,7 @@ static int ipheth_rx_submit(struct ipheth_device *dev, gfp_t mem_flags)
|
|||||||
|
|
||||||
usb_fill_bulk_urb(dev->rx_urb, udev,
|
usb_fill_bulk_urb(dev->rx_urb, udev,
|
||||||
usb_rcvbulkpipe(udev, dev->bulk_in),
|
usb_rcvbulkpipe(udev, dev->bulk_in),
|
||||||
dev->rx_buf, IPHETH_BUF_SIZE,
|
dev->rx_buf, IPHETH_BUF_SIZE + IPHETH_IP_ALIGN,
|
||||||
ipheth_rcvbulk_callback,
|
ipheth_rcvbulk_callback,
|
||||||
dev);
|
dev);
|
||||||
dev->rx_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
|
dev->rx_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
|
||||||
|
|||||||
@@ -642,6 +642,8 @@ read_status:
|
|||||||
*/
|
*/
|
||||||
if (ctrl->power_fault_detected)
|
if (ctrl->power_fault_detected)
|
||||||
status &= ~PCI_EXP_SLTSTA_PFD;
|
status &= ~PCI_EXP_SLTSTA_PFD;
|
||||||
|
else if (status & PCI_EXP_SLTSTA_PFD)
|
||||||
|
ctrl->power_fault_detected = true;
|
||||||
|
|
||||||
events |= status;
|
events |= status;
|
||||||
if (!events) {
|
if (!events) {
|
||||||
@@ -651,7 +653,7 @@ read_status:
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (status) {
|
if (status) {
|
||||||
pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, events);
|
pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, status);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* In MSI mode, all event bits must be zero before the port
|
* In MSI mode, all event bits must be zero before the port
|
||||||
@@ -725,8 +727,7 @@ static irqreturn_t pciehp_ist(int irq, void *dev_id)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Check Power Fault Detected */
|
/* Check Power Fault Detected */
|
||||||
if ((events & PCI_EXP_SLTSTA_PFD) && !ctrl->power_fault_detected) {
|
if (events & PCI_EXP_SLTSTA_PFD) {
|
||||||
ctrl->power_fault_detected = 1;
|
|
||||||
ctrl_err(ctrl, "Slot(%s): Power fault\n", slot_name(ctrl));
|
ctrl_err(ctrl, "Slot(%s): Power fault\n", slot_name(ctrl));
|
||||||
pciehp_set_indicators(ctrl, PCI_EXP_SLTCTL_PWR_IND_OFF,
|
pciehp_set_indicators(ctrl, PCI_EXP_SLTCTL_PWR_IND_OFF,
|
||||||
PCI_EXP_SLTCTL_ATTN_IND_ON);
|
PCI_EXP_SLTCTL_ATTN_IND_ON);
|
||||||
|
|||||||
@@ -179,19 +179,21 @@ nlm_delete_file(struct nlm_file *file)
|
|||||||
static int nlm_unlock_files(struct nlm_file *file)
|
static int nlm_unlock_files(struct nlm_file *file)
|
||||||
{
|
{
|
||||||
struct file_lock lock;
|
struct file_lock lock;
|
||||||
struct file *f;
|
|
||||||
|
|
||||||
|
locks_init_lock(&lock);
|
||||||
lock.fl_type = F_UNLCK;
|
lock.fl_type = F_UNLCK;
|
||||||
lock.fl_start = 0;
|
lock.fl_start = 0;
|
||||||
lock.fl_end = OFFSET_MAX;
|
lock.fl_end = OFFSET_MAX;
|
||||||
for (f = file->f_file[0]; f <= file->f_file[1]; f++) {
|
if (file->f_file[O_RDONLY] &&
|
||||||
if (f && vfs_lock_file(f, F_SETLK, &lock, NULL) < 0) {
|
vfs_lock_file(file->f_file[O_RDONLY], F_SETLK, &lock, NULL))
|
||||||
pr_warn("lockd: unlock failure in %s:%d\n",
|
goto out_err;
|
||||||
__FILE__, __LINE__);
|
if (file->f_file[O_WRONLY] &&
|
||||||
return 1;
|
vfs_lock_file(file->f_file[O_WRONLY], F_SETLK, &lock, NULL))
|
||||||
}
|
goto out_err;
|
||||||
}
|
|
||||||
return 0;
|
return 0;
|
||||||
|
out_err:
|
||||||
|
pr_warn("lockd: unlock failure in %s:%d\n", __FILE__, __LINE__);
|
||||||
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|||||||
@@ -611,9 +611,6 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
|
|||||||
if (fanotify_is_perm_event(event->mask))
|
if (fanotify_is_perm_event(event->mask))
|
||||||
FANOTIFY_PERM(event)->fd = fd;
|
FANOTIFY_PERM(event)->fd = fd;
|
||||||
|
|
||||||
if (f)
|
|
||||||
fd_install(fd, f);
|
|
||||||
|
|
||||||
if (info_mode) {
|
if (info_mode) {
|
||||||
ret = copy_info_records_to_user(event, info, info_mode, pidfd,
|
ret = copy_info_records_to_user(event, info, info_mode, pidfd,
|
||||||
buf, count);
|
buf, count);
|
||||||
@@ -621,6 +618,9 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
|
|||||||
goto out_close_fd;
|
goto out_close_fd;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (f)
|
||||||
|
fd_install(fd, f);
|
||||||
|
|
||||||
return metadata.event_len;
|
return metadata.event_len;
|
||||||
|
|
||||||
out_close_fd:
|
out_close_fd:
|
||||||
|
|||||||
@@ -145,7 +145,7 @@ static int ovl_copy_fileattr(struct inode *inode, struct path *old,
|
|||||||
if (err == -ENOTTY || err == -EINVAL)
|
if (err == -ENOTTY || err == -EINVAL)
|
||||||
return 0;
|
return 0;
|
||||||
pr_warn("failed to retrieve lower fileattr (%pd2, err=%i)\n",
|
pr_warn("failed to retrieve lower fileattr (%pd2, err=%i)\n",
|
||||||
old, err);
|
old->dentry, err);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -157,7 +157,9 @@ static int ovl_copy_fileattr(struct inode *inode, struct path *old,
|
|||||||
*/
|
*/
|
||||||
if (oldfa.flags & OVL_PROT_FS_FLAGS_MASK) {
|
if (oldfa.flags & OVL_PROT_FS_FLAGS_MASK) {
|
||||||
err = ovl_set_protattr(inode, new->dentry, &oldfa);
|
err = ovl_set_protattr(inode, new->dentry, &oldfa);
|
||||||
if (err)
|
if (err == -EPERM)
|
||||||
|
pr_warn_once("copying fileattr: no xattr on upper\n");
|
||||||
|
else if (err)
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -167,8 +169,16 @@ static int ovl_copy_fileattr(struct inode *inode, struct path *old,
|
|||||||
|
|
||||||
err = ovl_real_fileattr_get(new, &newfa);
|
err = ovl_real_fileattr_get(new, &newfa);
|
||||||
if (err) {
|
if (err) {
|
||||||
|
/*
|
||||||
|
* Returning an error if upper doesn't support fileattr will
|
||||||
|
* result in a regression, so revert to the old behavior.
|
||||||
|
*/
|
||||||
|
if (err == -ENOTTY || err == -EINVAL) {
|
||||||
|
pr_warn_once("copying fileattr: no support on upper\n");
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
pr_warn("failed to retrieve upper fileattr (%pd2, err=%i)\n",
|
pr_warn("failed to retrieve upper fileattr (%pd2, err=%i)\n",
|
||||||
new, err);
|
new->dentry, err);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -555,6 +555,14 @@ static ssize_t cgroup_release_agent_write(struct kernfs_open_file *of,
|
|||||||
|
|
||||||
BUILD_BUG_ON(sizeof(cgrp->root->release_agent_path) < PATH_MAX);
|
BUILD_BUG_ON(sizeof(cgrp->root->release_agent_path) < PATH_MAX);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Release agent gets called with all capabilities,
|
||||||
|
* require capabilities to set release agent.
|
||||||
|
*/
|
||||||
|
if ((of->file->f_cred->user_ns != &init_user_ns) ||
|
||||||
|
!capable(CAP_SYS_ADMIN))
|
||||||
|
return -EPERM;
|
||||||
|
|
||||||
cgrp = cgroup_kn_lock_live(of->kn, false);
|
cgrp = cgroup_kn_lock_live(of->kn, false);
|
||||||
if (!cgrp)
|
if (!cgrp)
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
@@ -966,6 +974,12 @@ int cgroup1_parse_param(struct fs_context *fc, struct fs_parameter *param)
|
|||||||
/* Specifying two release agents is forbidden */
|
/* Specifying two release agents is forbidden */
|
||||||
if (ctx->release_agent)
|
if (ctx->release_agent)
|
||||||
return invalfc(fc, "release_agent respecified");
|
return invalfc(fc, "release_agent respecified");
|
||||||
|
/*
|
||||||
|
* Release agent gets called with all capabilities,
|
||||||
|
* require capabilities to set release agent.
|
||||||
|
*/
|
||||||
|
if ((fc->user_ns != &init_user_ns) || !capable(CAP_SYS_ADMIN))
|
||||||
|
return invalfc(fc, "Setting release_agent not allowed");
|
||||||
ctx->release_agent = param->string;
|
ctx->release_agent = param->string;
|
||||||
param->string = NULL;
|
param->string = NULL;
|
||||||
break;
|
break;
|
||||||
|
|||||||
@@ -1610,8 +1610,7 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
|
|||||||
* Make sure that subparts_cpus is a subset of cpus_allowed.
|
* Make sure that subparts_cpus is a subset of cpus_allowed.
|
||||||
*/
|
*/
|
||||||
if (cs->nr_subparts_cpus) {
|
if (cs->nr_subparts_cpus) {
|
||||||
cpumask_andnot(cs->subparts_cpus, cs->subparts_cpus,
|
cpumask_and(cs->subparts_cpus, cs->subparts_cpus, cs->cpus_allowed);
|
||||||
cs->cpus_allowed);
|
|
||||||
cs->nr_subparts_cpus = cpumask_weight(cs->subparts_cpus);
|
cs->nr_subparts_cpus = cpumask_weight(cs->subparts_cpus);
|
||||||
}
|
}
|
||||||
spin_unlock_irq(&callback_lock);
|
spin_unlock_irq(&callback_lock);
|
||||||
|
|||||||
@@ -3254,8 +3254,8 @@ static int __rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh,
|
|||||||
struct nlattr *slave_attr[RTNL_SLAVE_MAX_TYPE + 1];
|
struct nlattr *slave_attr[RTNL_SLAVE_MAX_TYPE + 1];
|
||||||
unsigned char name_assign_type = NET_NAME_USER;
|
unsigned char name_assign_type = NET_NAME_USER;
|
||||||
struct nlattr *linkinfo[IFLA_INFO_MAX + 1];
|
struct nlattr *linkinfo[IFLA_INFO_MAX + 1];
|
||||||
const struct rtnl_link_ops *m_ops = NULL;
|
const struct rtnl_link_ops *m_ops;
|
||||||
struct net_device *master_dev = NULL;
|
struct net_device *master_dev;
|
||||||
struct net *net = sock_net(skb->sk);
|
struct net *net = sock_net(skb->sk);
|
||||||
const struct rtnl_link_ops *ops;
|
const struct rtnl_link_ops *ops;
|
||||||
struct nlattr *tb[IFLA_MAX + 1];
|
struct nlattr *tb[IFLA_MAX + 1];
|
||||||
@@ -3293,6 +3293,8 @@ replay:
|
|||||||
else
|
else
|
||||||
dev = NULL;
|
dev = NULL;
|
||||||
|
|
||||||
|
master_dev = NULL;
|
||||||
|
m_ops = NULL;
|
||||||
if (dev) {
|
if (dev) {
|
||||||
master_dev = netdev_master_upper_dev_get(dev);
|
master_dev = netdev_master_upper_dev_get(dev);
|
||||||
if (master_dev)
|
if (master_dev)
|
||||||
|
|||||||
@@ -1653,6 +1653,8 @@ static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb,
|
|||||||
(mss != tcp_skb_seglen(skb)))
|
(mss != tcp_skb_seglen(skb)))
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
|
if (!tcp_skb_can_collapse(prev, skb))
|
||||||
|
goto out;
|
||||||
len = skb->len;
|
len = skb->len;
|
||||||
pcount = tcp_skb_pcount(skb);
|
pcount = tcp_skb_pcount(skb);
|
||||||
if (tcp_skb_shift(prev, skb, pcount, len))
|
if (tcp_skb_shift(prev, skb, pcount, len))
|
||||||
|
|||||||
@@ -1753,7 +1753,10 @@ static int fanout_add(struct sock *sk, struct fanout_args *args)
|
|||||||
err = -ENOSPC;
|
err = -ENOSPC;
|
||||||
if (refcount_read(&match->sk_ref) < match->max_num_members) {
|
if (refcount_read(&match->sk_ref) < match->max_num_members) {
|
||||||
__dev_remove_pack(&po->prot_hook);
|
__dev_remove_pack(&po->prot_hook);
|
||||||
po->fanout = match;
|
|
||||||
|
/* Paired with packet_setsockopt(PACKET_FANOUT_DATA) */
|
||||||
|
WRITE_ONCE(po->fanout, match);
|
||||||
|
|
||||||
po->rollover = rollover;
|
po->rollover = rollover;
|
||||||
rollover = NULL;
|
rollover = NULL;
|
||||||
refcount_set(&match->sk_ref, refcount_read(&match->sk_ref) + 1);
|
refcount_set(&match->sk_ref, refcount_read(&match->sk_ref) + 1);
|
||||||
@@ -3906,7 +3909,8 @@ packet_setsockopt(struct socket *sock, int level, int optname, sockptr_t optval,
|
|||||||
}
|
}
|
||||||
case PACKET_FANOUT_DATA:
|
case PACKET_FANOUT_DATA:
|
||||||
{
|
{
|
||||||
if (!po->fanout)
|
/* Paired with the WRITE_ONCE() in fanout_add() */
|
||||||
|
if (!READ_ONCE(po->fanout))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
return fanout_set_data(po, optval, optlen);
|
return fanout_set_data(po, optval, optlen);
|
||||||
|
|||||||
@@ -1945,9 +1945,9 @@ static int tc_new_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
|
|||||||
bool prio_allocate;
|
bool prio_allocate;
|
||||||
u32 parent;
|
u32 parent;
|
||||||
u32 chain_index;
|
u32 chain_index;
|
||||||
struct Qdisc *q = NULL;
|
struct Qdisc *q;
|
||||||
struct tcf_chain_info chain_info;
|
struct tcf_chain_info chain_info;
|
||||||
struct tcf_chain *chain = NULL;
|
struct tcf_chain *chain;
|
||||||
struct tcf_block *block;
|
struct tcf_block *block;
|
||||||
struct tcf_proto *tp;
|
struct tcf_proto *tp;
|
||||||
unsigned long cl;
|
unsigned long cl;
|
||||||
@@ -1976,6 +1976,8 @@ replay:
|
|||||||
tp = NULL;
|
tp = NULL;
|
||||||
cl = 0;
|
cl = 0;
|
||||||
block = NULL;
|
block = NULL;
|
||||||
|
q = NULL;
|
||||||
|
chain = NULL;
|
||||||
flags = 0;
|
flags = 0;
|
||||||
|
|
||||||
if (prio == 0) {
|
if (prio == 0) {
|
||||||
@@ -2798,8 +2800,8 @@ static int tc_ctl_chain(struct sk_buff *skb, struct nlmsghdr *n,
|
|||||||
struct tcmsg *t;
|
struct tcmsg *t;
|
||||||
u32 parent;
|
u32 parent;
|
||||||
u32 chain_index;
|
u32 chain_index;
|
||||||
struct Qdisc *q = NULL;
|
struct Qdisc *q;
|
||||||
struct tcf_chain *chain = NULL;
|
struct tcf_chain *chain;
|
||||||
struct tcf_block *block;
|
struct tcf_block *block;
|
||||||
unsigned long cl;
|
unsigned long cl;
|
||||||
int err;
|
int err;
|
||||||
@@ -2809,6 +2811,7 @@ static int tc_ctl_chain(struct sk_buff *skb, struct nlmsghdr *n,
|
|||||||
return -EPERM;
|
return -EPERM;
|
||||||
|
|
||||||
replay:
|
replay:
|
||||||
|
q = NULL;
|
||||||
err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
|
err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
|
||||||
rtm_tca_policy, extack);
|
rtm_tca_policy, extack);
|
||||||
if (err < 0)
|
if (err < 0)
|
||||||
|
|||||||
@@ -75,6 +75,7 @@ init()
|
|||||||
|
|
||||||
# let $ns2 reach any $ns1 address from any interface
|
# let $ns2 reach any $ns1 address from any interface
|
||||||
ip -net "$ns2" route add default via 10.0.$i.1 dev ns2eth$i metric 10$i
|
ip -net "$ns2" route add default via 10.0.$i.1 dev ns2eth$i metric 10$i
|
||||||
|
ip -net "$ns2" route add default via dead:beef:$i::1 dev ns2eth$i metric 10$i
|
||||||
done
|
done
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1383,7 +1384,7 @@ ipv6_tests()
|
|||||||
reset
|
reset
|
||||||
ip netns exec $ns1 ./pm_nl_ctl limits 0 1
|
ip netns exec $ns1 ./pm_nl_ctl limits 0 1
|
||||||
ip netns exec $ns2 ./pm_nl_ctl limits 0 1
|
ip netns exec $ns2 ./pm_nl_ctl limits 0 1
|
||||||
ip netns exec $ns2 ./pm_nl_ctl add dead:beef:3::2 flags subflow
|
ip netns exec $ns2 ./pm_nl_ctl add dead:beef:3::2 dev ns2eth3 flags subflow
|
||||||
run_tests $ns1 $ns2 dead:beef:1::1 0 0 0 slow
|
run_tests $ns1 $ns2 dead:beef:1::1 0 0 0 slow
|
||||||
chk_join_nr "single subflow IPv6" 1 1 1
|
chk_join_nr "single subflow IPv6" 1 1 1
|
||||||
|
|
||||||
@@ -1418,7 +1419,7 @@ ipv6_tests()
|
|||||||
ip netns exec $ns1 ./pm_nl_ctl limits 0 2
|
ip netns exec $ns1 ./pm_nl_ctl limits 0 2
|
||||||
ip netns exec $ns1 ./pm_nl_ctl add dead:beef:2::1 flags signal
|
ip netns exec $ns1 ./pm_nl_ctl add dead:beef:2::1 flags signal
|
||||||
ip netns exec $ns2 ./pm_nl_ctl limits 1 2
|
ip netns exec $ns2 ./pm_nl_ctl limits 1 2
|
||||||
ip netns exec $ns2 ./pm_nl_ctl add dead:beef:3::2 flags subflow
|
ip netns exec $ns2 ./pm_nl_ctl add dead:beef:3::2 dev ns2eth3 flags subflow
|
||||||
run_tests $ns1 $ns2 dead:beef:1::1 0 -1 -1 slow
|
run_tests $ns1 $ns2 dead:beef:1::1 0 -1 -1 slow
|
||||||
chk_join_nr "remove subflow and signal IPv6" 2 2 2
|
chk_join_nr "remove subflow and signal IPv6" 2 2 2
|
||||||
chk_add_nr 1 1
|
chk_add_nr 1 1
|
||||||
|
|||||||
Reference in New Issue
Block a user