Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

Pull networking fixes from David Miller:

 1) Use netif_rx_ni() when necessary in batman-adv stack, from Jussi
    Kivilinna.

 2) Fix loss of RTT samples in rxrpc, from David Howells.

 3) Memory leak in hns_nic_dev_probe(), from Dignhao Liu.

 4) ravb module cannot be unloaded, fix from Yuusuke Ashizuka.

 5) We disable BH for too lokng in sctp_get_port_local(), add a
    cond_resched() here as well, from Xin Long.

 6) Fix memory leak in st95hf_in_send_cmd, from Dinghao Liu.

 7) Out of bound access in bpf_raw_tp_link_fill_link_info(), from
    Yonghong Song.

 8) Missing of_node_put() in mt7530 DSA driver, from Sumera
    Priyadarsini.

 9) Fix crash in bnxt_fw_reset_task(), from Michael Chan.

10) Fix geneve tunnel checksumming bug in hns3, from Yi Li.

11) Memory leak in rxkad_verify_response, from Dinghao Liu.

12) In tipc, don't use smp_processor_id() in preemptible context. From
    Tuong Lien.

13) Fix signedness issue in mlx4 memory allocation, from Shung-Hsi Yu.

14) Missing clk_disable_prepare() in gemini driver, from Dan Carpenter.

15) Fix ABI mismatch between driver and firmware in nfp, from Louis
    Peens.

* git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (110 commits)
  net/smc: fix sock refcounting in case of termination
  net/smc: reset sndbuf_desc if freed
  net/smc: set rx_off for SMCR explicitly
  net/smc: fix toleration of fake add_link messages
  tg3: Fix soft lockup when tg3_reset_task() fails.
  doc: net: dsa: Fix typo in config code sample
  net: dp83867: Fix WoL SecureOn password
  nfp: flower: fix ABI mismatch between driver and firmware
  tipc: fix shutdown() of connectionless socket
  ipv6: Fix sysctl max for fib_multipath_hash_policy
  drivers/net/wan/hdlc: Change the default of hard_header_len to 0
  net: gemini: Fix another missing clk_disable_unprepare() in probe
  net: bcmgenet: fix mask check in bcmgenet_validate_flow()
  amd-xgbe: Add support for new port mode
  net: usb: dm9601: Add USB ID of Keenetic Plus DSL
  vhost: fix typo in error message
  net: ethernet: mlx4: Fix memory allocation in mlx4_buddy_init()
  pktgen: fix error message with wrong function name
  net: ethernet: ti: am65-cpsw: fix rmii 100Mbit link mode
  cxgb4: fix thermal zone device registration
  ...
This commit is contained in:
Linus Torvalds 2020-09-03 18:50:48 -07:00
commit 3e8d3bdc2a
110 changed files with 1072 additions and 603 deletions

View file

@ -1,4 +1,4 @@
Distributed Switch Architecture Device Tree Bindings Distributed Switch Architecture Device Tree Bindings
---------------------------------------------------- ----------------------------------------------------
See Documentation/devicetree/bindings/net/dsa/dsa.yaml for the documenation. See Documentation/devicetree/bindings/net/dsa/dsa.yaml for the documentation.

View file

@ -180,7 +180,7 @@ The configuration can only be set up via VLAN tagging and bridge setup.
# bring up the slave interfaces # bring up the slave interfaces
ip link set lan1 up ip link set lan1 up
ip link set lan1 up ip link set lan2 up
ip link set lan3 up ip link set lan3 up
# create bridge # create bridge

View file

@ -3389,6 +3389,7 @@ M: Florian Fainelli <f.fainelli@gmail.com>
L: netdev@vger.kernel.org L: netdev@vger.kernel.org
L: openwrt-devel@lists.openwrt.org (subscribers-only) L: openwrt-devel@lists.openwrt.org (subscribers-only)
S: Supported S: Supported
F: Documentation/devicetree/bindings/net/dsa/b53.txt
F: drivers/net/dsa/b53/* F: drivers/net/dsa/b53/*
F: include/linux/platform_data/b53.h F: include/linux/platform_data/b53.h
@ -3574,13 +3575,28 @@ L: bcm-kernel-feedback-list@broadcom.com
S: Maintained S: Maintained
F: drivers/phy/broadcom/phy-brcm-usb* F: drivers/phy/broadcom/phy-brcm-usb*
BROADCOM ETHERNET PHY DRIVERS
M: Florian Fainelli <f.fainelli@gmail.com>
L: bcm-kernel-feedback-list@broadcom.com
L: netdev@vger.kernel.org
S: Supported
F: Documentation/devicetree/bindings/net/broadcom-bcm87xx.txt
F: drivers/net/phy/bcm*.[ch]
F: drivers/net/phy/broadcom.c
F: include/linux/brcmphy.h
BROADCOM GENET ETHERNET DRIVER BROADCOM GENET ETHERNET DRIVER
M: Doug Berger <opendmb@gmail.com> M: Doug Berger <opendmb@gmail.com>
M: Florian Fainelli <f.fainelli@gmail.com> M: Florian Fainelli <f.fainelli@gmail.com>
L: bcm-kernel-feedback-list@broadcom.com L: bcm-kernel-feedback-list@broadcom.com
L: netdev@vger.kernel.org L: netdev@vger.kernel.org
S: Supported S: Supported
F: Documentation/devicetree/bindings/net/brcm,bcmgenet.txt
F: Documentation/devicetree/bindings/net/brcm,unimac-mdio.txt
F: drivers/net/ethernet/broadcom/genet/ F: drivers/net/ethernet/broadcom/genet/
F: drivers/net/mdio/mdio-bcm-unimac.c
F: include/linux/platform_data/bcmgenet.h
F: include/linux/platform_data/mdio-bcm-unimac.h
BROADCOM IPROC ARM ARCHITECTURE BROADCOM IPROC ARM ARCHITECTURE
M: Ray Jui <rjui@broadcom.com> M: Ray Jui <rjui@broadcom.com>
@ -6496,7 +6512,6 @@ F: net/bridge/
ETHERNET PHY LIBRARY ETHERNET PHY LIBRARY
M: Andrew Lunn <andrew@lunn.ch> M: Andrew Lunn <andrew@lunn.ch>
M: Florian Fainelli <f.fainelli@gmail.com>
M: Heiner Kallweit <hkallweit1@gmail.com> M: Heiner Kallweit <hkallweit1@gmail.com>
R: Russell King <linux@armlinux.org.uk> R: Russell King <linux@armlinux.org.uk>
L: netdev@vger.kernel.org L: netdev@vger.kernel.org

View file

@ -998,6 +998,7 @@ static int fs_open(struct atm_vcc *atm_vcc)
error = make_rate (pcr, r, &tmc0, NULL); error = make_rate (pcr, r, &tmc0, NULL);
if (error) { if (error) {
kfree(tc); kfree(tc);
kfree(vcc);
return error; return error;
} }
} }

View file

@ -1326,14 +1326,17 @@ mt7530_setup(struct dsa_switch *ds)
if (phy_node->parent == priv->dev->of_node->parent) { if (phy_node->parent == priv->dev->of_node->parent) {
ret = of_get_phy_mode(mac_np, &interface); ret = of_get_phy_mode(mac_np, &interface);
if (ret && ret != -ENODEV) if (ret && ret != -ENODEV) {
of_node_put(mac_np);
return ret; return ret;
}
id = of_mdio_parse_addr(ds->dev, phy_node); id = of_mdio_parse_addr(ds->dev, phy_node);
if (id == 0) if (id == 0)
priv->p5_intf_sel = P5_INTF_SEL_PHY_P0; priv->p5_intf_sel = P5_INTF_SEL_PHY_P0;
if (id == 4) if (id == 4)
priv->p5_intf_sel = P5_INTF_SEL_PHY_P4; priv->p5_intf_sel = P5_INTF_SEL_PHY_P4;
} }
of_node_put(mac_np);
of_node_put(phy_node); of_node_put(phy_node);
break; break;
} }
@ -1501,7 +1504,7 @@ unsupported:
phylink_set(mask, 100baseT_Full); phylink_set(mask, 100baseT_Full);
if (state->interface != PHY_INTERFACE_MODE_MII) { if (state->interface != PHY_INTERFACE_MODE_MII) {
phylink_set(mask, 1000baseT_Half); /* This switch only supports 1G full-duplex. */
phylink_set(mask, 1000baseT_Full); phylink_set(mask, 1000baseT_Full);
if (port == 5) if (port == 5)
phylink_set(mask, 1000baseX_Full); phylink_set(mask, 1000baseX_Full);

View file

@ -400,6 +400,7 @@ static int felix_parse_ports_node(struct felix *felix,
if (err < 0) { if (err < 0) {
dev_err(dev, "Unsupported PHY mode %s on port %d\n", dev_err(dev, "Unsupported PHY mode %s on port %d\n",
phy_modes(phy_mode), port); phy_modes(phy_mode), port);
of_node_put(child);
return err; return err;
} }

View file

@ -3415,7 +3415,7 @@ static int sja1105_check_device_id(struct sja1105_private *priv)
sja1105_unpack(prod_id, &part_no, 19, 4, SJA1105_SIZE_DEVICE_ID); sja1105_unpack(prod_id, &part_no, 19, 4, SJA1105_SIZE_DEVICE_ID);
for (match = sja1105_dt_ids; match->compatible; match++) { for (match = sja1105_dt_ids; match->compatible[0]; match++) {
const struct sja1105_info *info = match->data; const struct sja1105_info *info = match->data;
/* Is what's been probed in our match table at all? */ /* Is what's been probed in our match table at all? */

View file

@ -166,6 +166,7 @@ enum xgbe_port_mode {
XGBE_PORT_MODE_10GBASE_T, XGBE_PORT_MODE_10GBASE_T,
XGBE_PORT_MODE_10GBASE_R, XGBE_PORT_MODE_10GBASE_R,
XGBE_PORT_MODE_SFP, XGBE_PORT_MODE_SFP,
XGBE_PORT_MODE_BACKPLANE_NO_AUTONEG,
XGBE_PORT_MODE_MAX, XGBE_PORT_MODE_MAX,
}; };
@ -1634,6 +1635,7 @@ static enum xgbe_mode xgbe_phy_an73_redrv_outcome(struct xgbe_prv_data *pdata)
if (ad_reg & 0x80) { if (ad_reg & 0x80) {
switch (phy_data->port_mode) { switch (phy_data->port_mode) {
case XGBE_PORT_MODE_BACKPLANE: case XGBE_PORT_MODE_BACKPLANE:
case XGBE_PORT_MODE_BACKPLANE_NO_AUTONEG:
mode = XGBE_MODE_KR; mode = XGBE_MODE_KR;
break; break;
default: default:
@ -1643,6 +1645,7 @@ static enum xgbe_mode xgbe_phy_an73_redrv_outcome(struct xgbe_prv_data *pdata)
} else if (ad_reg & 0x20) { } else if (ad_reg & 0x20) {
switch (phy_data->port_mode) { switch (phy_data->port_mode) {
case XGBE_PORT_MODE_BACKPLANE: case XGBE_PORT_MODE_BACKPLANE:
case XGBE_PORT_MODE_BACKPLANE_NO_AUTONEG:
mode = XGBE_MODE_KX_1000; mode = XGBE_MODE_KX_1000;
break; break;
case XGBE_PORT_MODE_1000BASE_X: case XGBE_PORT_MODE_1000BASE_X:
@ -1782,6 +1785,7 @@ static void xgbe_phy_an_advertising(struct xgbe_prv_data *pdata,
switch (phy_data->port_mode) { switch (phy_data->port_mode) {
case XGBE_PORT_MODE_BACKPLANE: case XGBE_PORT_MODE_BACKPLANE:
case XGBE_PORT_MODE_BACKPLANE_NO_AUTONEG:
XGBE_SET_ADV(dlks, 10000baseKR_Full); XGBE_SET_ADV(dlks, 10000baseKR_Full);
break; break;
case XGBE_PORT_MODE_BACKPLANE_2500: case XGBE_PORT_MODE_BACKPLANE_2500:
@ -1874,6 +1878,7 @@ static enum xgbe_an_mode xgbe_phy_an_mode(struct xgbe_prv_data *pdata)
switch (phy_data->port_mode) { switch (phy_data->port_mode) {
case XGBE_PORT_MODE_BACKPLANE: case XGBE_PORT_MODE_BACKPLANE:
return XGBE_AN_MODE_CL73; return XGBE_AN_MODE_CL73;
case XGBE_PORT_MODE_BACKPLANE_NO_AUTONEG:
case XGBE_PORT_MODE_BACKPLANE_2500: case XGBE_PORT_MODE_BACKPLANE_2500:
return XGBE_AN_MODE_NONE; return XGBE_AN_MODE_NONE;
case XGBE_PORT_MODE_1000BASE_T: case XGBE_PORT_MODE_1000BASE_T:
@ -2156,6 +2161,7 @@ static enum xgbe_mode xgbe_phy_switch_mode(struct xgbe_prv_data *pdata)
switch (phy_data->port_mode) { switch (phy_data->port_mode) {
case XGBE_PORT_MODE_BACKPLANE: case XGBE_PORT_MODE_BACKPLANE:
case XGBE_PORT_MODE_BACKPLANE_NO_AUTONEG:
return xgbe_phy_switch_bp_mode(pdata); return xgbe_phy_switch_bp_mode(pdata);
case XGBE_PORT_MODE_BACKPLANE_2500: case XGBE_PORT_MODE_BACKPLANE_2500:
return xgbe_phy_switch_bp_2500_mode(pdata); return xgbe_phy_switch_bp_2500_mode(pdata);
@ -2251,6 +2257,7 @@ static enum xgbe_mode xgbe_phy_get_mode(struct xgbe_prv_data *pdata,
switch (phy_data->port_mode) { switch (phy_data->port_mode) {
case XGBE_PORT_MODE_BACKPLANE: case XGBE_PORT_MODE_BACKPLANE:
case XGBE_PORT_MODE_BACKPLANE_NO_AUTONEG:
return xgbe_phy_get_bp_mode(speed); return xgbe_phy_get_bp_mode(speed);
case XGBE_PORT_MODE_BACKPLANE_2500: case XGBE_PORT_MODE_BACKPLANE_2500:
return xgbe_phy_get_bp_2500_mode(speed); return xgbe_phy_get_bp_2500_mode(speed);
@ -2426,6 +2433,7 @@ static bool xgbe_phy_use_mode(struct xgbe_prv_data *pdata, enum xgbe_mode mode)
switch (phy_data->port_mode) { switch (phy_data->port_mode) {
case XGBE_PORT_MODE_BACKPLANE: case XGBE_PORT_MODE_BACKPLANE:
case XGBE_PORT_MODE_BACKPLANE_NO_AUTONEG:
return xgbe_phy_use_bp_mode(pdata, mode); return xgbe_phy_use_bp_mode(pdata, mode);
case XGBE_PORT_MODE_BACKPLANE_2500: case XGBE_PORT_MODE_BACKPLANE_2500:
return xgbe_phy_use_bp_2500_mode(pdata, mode); return xgbe_phy_use_bp_2500_mode(pdata, mode);
@ -2515,6 +2523,7 @@ static bool xgbe_phy_valid_speed(struct xgbe_prv_data *pdata, int speed)
switch (phy_data->port_mode) { switch (phy_data->port_mode) {
case XGBE_PORT_MODE_BACKPLANE: case XGBE_PORT_MODE_BACKPLANE:
case XGBE_PORT_MODE_BACKPLANE_NO_AUTONEG:
return xgbe_phy_valid_speed_bp_mode(speed); return xgbe_phy_valid_speed_bp_mode(speed);
case XGBE_PORT_MODE_BACKPLANE_2500: case XGBE_PORT_MODE_BACKPLANE_2500:
return xgbe_phy_valid_speed_bp_2500_mode(speed); return xgbe_phy_valid_speed_bp_2500_mode(speed);
@ -2792,6 +2801,7 @@ static bool xgbe_phy_port_mode_mismatch(struct xgbe_prv_data *pdata)
switch (phy_data->port_mode) { switch (phy_data->port_mode) {
case XGBE_PORT_MODE_BACKPLANE: case XGBE_PORT_MODE_BACKPLANE:
case XGBE_PORT_MODE_BACKPLANE_NO_AUTONEG:
if ((phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000) || if ((phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000) ||
(phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10000)) (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10000))
return false; return false;
@ -2844,6 +2854,7 @@ static bool xgbe_phy_conn_type_mismatch(struct xgbe_prv_data *pdata)
switch (phy_data->port_mode) { switch (phy_data->port_mode) {
case XGBE_PORT_MODE_BACKPLANE: case XGBE_PORT_MODE_BACKPLANE:
case XGBE_PORT_MODE_BACKPLANE_NO_AUTONEG:
case XGBE_PORT_MODE_BACKPLANE_2500: case XGBE_PORT_MODE_BACKPLANE_2500:
if (phy_data->conn_type == XGBE_CONN_TYPE_BACKPLANE) if (phy_data->conn_type == XGBE_CONN_TYPE_BACKPLANE)
return false; return false;
@ -3160,6 +3171,8 @@ static int xgbe_phy_init(struct xgbe_prv_data *pdata)
/* Backplane support */ /* Backplane support */
case XGBE_PORT_MODE_BACKPLANE: case XGBE_PORT_MODE_BACKPLANE:
XGBE_SET_SUP(lks, Autoneg); XGBE_SET_SUP(lks, Autoneg);
fallthrough;
case XGBE_PORT_MODE_BACKPLANE_NO_AUTONEG:
XGBE_SET_SUP(lks, Pause); XGBE_SET_SUP(lks, Pause);
XGBE_SET_SUP(lks, Asym_Pause); XGBE_SET_SUP(lks, Asym_Pause);
XGBE_SET_SUP(lks, Backplane); XGBE_SET_SUP(lks, Backplane);

View file

@ -153,6 +153,7 @@ int arc_mdio_probe(struct arc_emac_priv *priv)
if (IS_ERR(data->reset_gpio)) { if (IS_ERR(data->reset_gpio)) {
error = PTR_ERR(data->reset_gpio); error = PTR_ERR(data->reset_gpio);
dev_err(priv->dev, "Failed to request gpio: %d\n", error); dev_err(priv->dev, "Failed to request gpio: %d\n", error);
mdiobus_free(bus);
return error; return error;
} }

View file

@ -2491,8 +2491,10 @@ static int bcm_sysport_probe(struct platform_device *pdev)
priv->tx_rings = devm_kcalloc(&pdev->dev, txq, priv->tx_rings = devm_kcalloc(&pdev->dev, txq,
sizeof(struct bcm_sysport_tx_ring), sizeof(struct bcm_sysport_tx_ring),
GFP_KERNEL); GFP_KERNEL);
if (!priv->tx_rings) if (!priv->tx_rings) {
return -ENOMEM; ret = -ENOMEM;
goto err_free_netdev;
}
priv->is_lite = params->is_lite; priv->is_lite = params->is_lite;
priv->num_rx_desc_words = params->num_rx_desc_words; priv->num_rx_desc_words = params->num_rx_desc_words;

View file

@ -1141,6 +1141,9 @@ static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
static void bnxt_queue_fw_reset_work(struct bnxt *bp, unsigned long delay) static void bnxt_queue_fw_reset_work(struct bnxt *bp, unsigned long delay)
{ {
if (!(test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)))
return;
if (BNXT_PF(bp)) if (BNXT_PF(bp))
queue_delayed_work(bnxt_pf_wq, &bp->fw_reset_task, delay); queue_delayed_work(bnxt_pf_wq, &bp->fw_reset_task, delay);
else else
@ -1157,10 +1160,12 @@ static void bnxt_queue_sp_work(struct bnxt *bp)
static void bnxt_cancel_sp_work(struct bnxt *bp) static void bnxt_cancel_sp_work(struct bnxt *bp)
{ {
if (BNXT_PF(bp)) if (BNXT_PF(bp)) {
flush_workqueue(bnxt_pf_wq); flush_workqueue(bnxt_pf_wq);
else } else {
cancel_work_sync(&bp->sp_task); cancel_work_sync(&bp->sp_task);
cancel_delayed_work_sync(&bp->fw_reset_task);
}
} }
static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr) static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
@ -6102,6 +6107,21 @@ static int bnxt_get_func_stat_ctxs(struct bnxt *bp)
return cp + ulp_stat; return cp + ulp_stat;
} }
/* Check if a default RSS map needs to be setup. This function is only
* used on older firmware that does not require reserving RX rings.
*/
static void bnxt_check_rss_tbl_no_rmgr(struct bnxt *bp)
{
struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
/* The RSS map is valid for RX rings set to resv_rx_rings */
if (hw_resc->resv_rx_rings != bp->rx_nr_rings) {
hw_resc->resv_rx_rings = bp->rx_nr_rings;
if (!netif_is_rxfh_configured(bp->dev))
bnxt_set_dflt_rss_indir_tbl(bp);
}
}
static bool bnxt_need_reserve_rings(struct bnxt *bp) static bool bnxt_need_reserve_rings(struct bnxt *bp)
{ {
struct bnxt_hw_resc *hw_resc = &bp->hw_resc; struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
@ -6110,22 +6130,28 @@ static bool bnxt_need_reserve_rings(struct bnxt *bp)
int rx = bp->rx_nr_rings, stat; int rx = bp->rx_nr_rings, stat;
int vnic = 1, grp = rx; int vnic = 1, grp = rx;
if (bp->hwrm_spec_code < 0x10601) if (hw_resc->resv_tx_rings != bp->tx_nr_rings &&
return false; bp->hwrm_spec_code >= 0x10601)
if (hw_resc->resv_tx_rings != bp->tx_nr_rings)
return true; return true;
/* Old firmware does not need RX ring reservations but we still
* need to setup a default RSS map when needed. With new firmware
* we go through RX ring reservations first and then set up the
* RSS map for the successfully reserved RX rings when needed.
*/
if (!BNXT_NEW_RM(bp)) {
bnxt_check_rss_tbl_no_rmgr(bp);
return false;
}
if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5)) if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5))
vnic = rx + 1; vnic = rx + 1;
if (bp->flags & BNXT_FLAG_AGG_RINGS) if (bp->flags & BNXT_FLAG_AGG_RINGS)
rx <<= 1; rx <<= 1;
stat = bnxt_get_func_stat_ctxs(bp); stat = bnxt_get_func_stat_ctxs(bp);
if (BNXT_NEW_RM(bp) && if (hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cp ||
(hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cp ||
hw_resc->resv_vnics != vnic || hw_resc->resv_stat_ctxs != stat || hw_resc->resv_vnics != vnic || hw_resc->resv_stat_ctxs != stat ||
(hw_resc->resv_hw_ring_grps != grp && (hw_resc->resv_hw_ring_grps != grp &&
!(bp->flags & BNXT_FLAG_CHIP_P5)))) !(bp->flags & BNXT_FLAG_CHIP_P5)))
return true; return true;
if ((bp->flags & BNXT_FLAG_CHIP_P5) && BNXT_PF(bp) && if ((bp->flags & BNXT_FLAG_CHIP_P5) && BNXT_PF(bp) &&
hw_resc->resv_irqs != nq) hw_resc->resv_irqs != nq)
@ -6214,6 +6240,9 @@ static int __bnxt_reserve_rings(struct bnxt *bp)
if (!tx || !rx || !cp || !grp || !vnic || !stat) if (!tx || !rx || !cp || !grp || !vnic || !stat)
return -ENOMEM; return -ENOMEM;
if (!netif_is_rxfh_configured(bp->dev))
bnxt_set_dflt_rss_indir_tbl(bp);
return rc; return rc;
} }
@ -8495,9 +8524,6 @@ int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init)
rc = bnxt_init_int_mode(bp); rc = bnxt_init_int_mode(bp);
bnxt_ulp_irq_restart(bp, rc); bnxt_ulp_irq_restart(bp, rc);
} }
if (!netif_is_rxfh_configured(bp->dev))
bnxt_set_dflt_rss_indir_tbl(bp);
if (rc) { if (rc) {
netdev_err(bp->dev, "ring reservation/IRQ init failure rc: %d\n", rc); netdev_err(bp->dev, "ring reservation/IRQ init failure rc: %d\n", rc);
return rc; return rc;
@ -9284,16 +9310,19 @@ static ssize_t bnxt_show_temp(struct device *dev,
struct hwrm_temp_monitor_query_input req = {0}; struct hwrm_temp_monitor_query_input req = {0};
struct hwrm_temp_monitor_query_output *resp; struct hwrm_temp_monitor_query_output *resp;
struct bnxt *bp = dev_get_drvdata(dev); struct bnxt *bp = dev_get_drvdata(dev);
u32 temp = 0; u32 len = 0;
resp = bp->hwrm_cmd_resp_addr; resp = bp->hwrm_cmd_resp_addr;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TEMP_MONITOR_QUERY, -1, -1); bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TEMP_MONITOR_QUERY, -1, -1);
mutex_lock(&bp->hwrm_cmd_lock); mutex_lock(&bp->hwrm_cmd_lock);
if (!_hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT)) if (!_hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT))
temp = resp->temp * 1000; /* display millidegree */ len = sprintf(buf, "%u\n", resp->temp * 1000); /* display millidegree */
mutex_unlock(&bp->hwrm_cmd_lock); mutex_unlock(&bp->hwrm_cmd_lock);
return sprintf(buf, "%u\n", temp); if (len)
return len;
return sprintf(buf, "unknown\n");
} }
static SENSOR_DEVICE_ATTR(temp1_input, 0444, bnxt_show_temp, NULL, 0); static SENSOR_DEVICE_ATTR(temp1_input, 0444, bnxt_show_temp, NULL, 0);
@ -9475,15 +9504,15 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
} }
} }
bnxt_enable_napi(bp);
bnxt_debug_dev_init(bp);
rc = bnxt_init_nic(bp, irq_re_init); rc = bnxt_init_nic(bp, irq_re_init);
if (rc) { if (rc) {
netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc); netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
goto open_err; goto open_err_irq;
} }
bnxt_enable_napi(bp);
bnxt_debug_dev_init(bp);
if (link_re_init) { if (link_re_init) {
mutex_lock(&bp->link_lock); mutex_lock(&bp->link_lock);
rc = bnxt_update_phy_setting(bp); rc = bnxt_update_phy_setting(bp);
@ -9514,10 +9543,6 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
bnxt_vf_reps_open(bp); bnxt_vf_reps_open(bp);
return 0; return 0;
open_err:
bnxt_debug_dev_exit(bp);
bnxt_disable_napi(bp);
open_err_irq: open_err_irq:
bnxt_del_napi(bp); bnxt_del_napi(bp);
@ -11761,6 +11786,7 @@ static void bnxt_remove_one(struct pci_dev *pdev)
unregister_netdev(dev); unregister_netdev(dev);
bnxt_dl_unregister(bp); bnxt_dl_unregister(bp);
bnxt_shutdown_tc(bp); bnxt_shutdown_tc(bp);
clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
bnxt_cancel_sp_work(bp); bnxt_cancel_sp_work(bp);
bp->sp_event = 0; bp->sp_event = 0;
@ -12200,6 +12226,10 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (BNXT_CHIP_P5(bp)) if (BNXT_CHIP_P5(bp))
bp->flags |= BNXT_FLAG_CHIP_P5; bp->flags |= BNXT_FLAG_CHIP_P5;
rc = bnxt_alloc_rss_indir_tbl(bp);
if (rc)
goto init_err_pci_clean;
rc = bnxt_fw_init_one_p2(bp); rc = bnxt_fw_init_one_p2(bp);
if (rc) if (rc)
goto init_err_pci_clean; goto init_err_pci_clean;
@ -12304,11 +12334,6 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
*/ */
bp->tx_nr_rings_per_tc = bp->tx_nr_rings; bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
rc = bnxt_alloc_rss_indir_tbl(bp);
if (rc)
goto init_err_pci_clean;
bnxt_set_dflt_rss_indir_tbl(bp);
if (BNXT_PF(bp)) { if (BNXT_PF(bp)) {
if (!bnxt_pf_wq) { if (!bnxt_pf_wq) {
bnxt_pf_wq = bnxt_pf_wq =
@ -12339,6 +12364,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
(long)pci_resource_start(pdev, 0), dev->dev_addr); (long)pci_resource_start(pdev, 0), dev->dev_addr);
pcie_print_link_status(pdev); pcie_print_link_status(pdev);
pci_save_state(pdev);
return 0; return 0;
init_err_cleanup: init_err_cleanup:
@ -12536,6 +12562,8 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
"Cannot re-enable PCI device after reset.\n"); "Cannot re-enable PCI device after reset.\n");
} else { } else {
pci_set_master(pdev); pci_set_master(pdev);
pci_restore_state(pdev);
pci_save_state(pdev);
err = bnxt_hwrm_func_reset(bp); err = bnxt_hwrm_func_reset(bp);
if (!err) { if (!err) {

View file

@ -472,18 +472,11 @@ static int bnxt_get_num_tpa_ring_stats(struct bnxt *bp)
static int bnxt_get_num_ring_stats(struct bnxt *bp) static int bnxt_get_num_ring_stats(struct bnxt *bp)
{ {
int rx, tx, cmn; int rx, tx, cmn;
bool sh = false;
if (bp->flags & BNXT_FLAG_SHARED_RINGS)
sh = true;
rx = NUM_RING_RX_HW_STATS + NUM_RING_RX_SW_STATS + rx = NUM_RING_RX_HW_STATS + NUM_RING_RX_SW_STATS +
bnxt_get_num_tpa_ring_stats(bp); bnxt_get_num_tpa_ring_stats(bp);
tx = NUM_RING_TX_HW_STATS; tx = NUM_RING_TX_HW_STATS;
cmn = NUM_RING_CMN_SW_STATS; cmn = NUM_RING_CMN_SW_STATS;
if (sh)
return (rx + tx + cmn) * bp->cp_nr_rings;
else
return rx * bp->rx_nr_rings + tx * bp->tx_nr_rings + return rx * bp->rx_nr_rings + tx * bp->tx_nr_rings +
cmn * bp->cp_nr_rings; cmn * bp->cp_nr_rings;
} }
@ -806,7 +799,7 @@ static void bnxt_get_channels(struct net_device *dev,
int max_tx_sch_inputs; int max_tx_sch_inputs;
/* Get the most up-to-date max_tx_sch_inputs. */ /* Get the most up-to-date max_tx_sch_inputs. */
if (BNXT_NEW_RM(bp)) if (netif_running(dev) && BNXT_NEW_RM(bp))
bnxt_hwrm_func_resc_qcaps(bp, false); bnxt_hwrm_func_resc_qcaps(bp, false);
max_tx_sch_inputs = hw_resc->max_tx_sch_inputs; max_tx_sch_inputs = hw_resc->max_tx_sch_inputs;
@ -2323,6 +2316,9 @@ static int bnxt_get_nvram_directory(struct net_device *dev, u32 len, u8 *data)
if (rc != 0) if (rc != 0)
return rc; return rc;
if (!dir_entries || !entry_length)
return -EIO;
/* Insert 2 bytes of directory info (count and size of entries) */ /* Insert 2 bytes of directory info (count and size of entries) */
if (len < 2) if (len < 2)
return -EINVAL; return -EINVAL;

View file

@ -1364,7 +1364,7 @@ static int bcmgenet_validate_flow(struct net_device *dev,
case ETHER_FLOW: case ETHER_FLOW:
eth_mask = &cmd->fs.m_u.ether_spec; eth_mask = &cmd->fs.m_u.ether_spec;
/* don't allow mask which isn't valid */ /* don't allow mask which isn't valid */
if (VALIDATE_MASK(eth_mask->h_source) || if (VALIDATE_MASK(eth_mask->h_dest) ||
VALIDATE_MASK(eth_mask->h_source) || VALIDATE_MASK(eth_mask->h_source) ||
VALIDATE_MASK(eth_mask->h_proto)) { VALIDATE_MASK(eth_mask->h_proto)) {
netdev_err(dev, "rxnfc: Unsupported mask\n"); netdev_err(dev, "rxnfc: Unsupported mask\n");

View file

@ -7221,8 +7221,8 @@ static inline void tg3_reset_task_schedule(struct tg3 *tp)
static inline void tg3_reset_task_cancel(struct tg3 *tp) static inline void tg3_reset_task_cancel(struct tg3 *tp)
{ {
if (test_and_clear_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
cancel_work_sync(&tp->reset_task); cancel_work_sync(&tp->reset_task);
tg3_flag_clear(tp, RESET_TASK_PENDING);
tg3_flag_clear(tp, TX_RECOVERY_PENDING); tg3_flag_clear(tp, TX_RECOVERY_PENDING);
} }
@ -11209,18 +11209,27 @@ static void tg3_reset_task(struct work_struct *work)
tg3_halt(tp, RESET_KIND_SHUTDOWN, 0); tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
err = tg3_init_hw(tp, true); err = tg3_init_hw(tp, true);
if (err) if (err) {
tg3_full_unlock(tp);
tp->irq_sync = 0;
tg3_napi_enable(tp);
/* Clear this flag so that tg3_reset_task_cancel() will not
* call cancel_work_sync() and wait forever.
*/
tg3_flag_clear(tp, RESET_TASK_PENDING);
dev_close(tp->dev);
goto out; goto out;
}
tg3_netif_start(tp); tg3_netif_start(tp);
out:
tg3_full_unlock(tp); tg3_full_unlock(tp);
if (!err) if (!err)
tg3_phy_start(tp); tg3_phy_start(tp);
tg3_flag_clear(tp, RESET_TASK_PENDING); tg3_flag_clear(tp, RESET_TASK_PENDING);
out:
rtnl_unlock(); rtnl_unlock();
} }

View file

@ -62,6 +62,7 @@ static struct thermal_zone_device_ops cxgb4_thermal_ops = {
int cxgb4_thermal_init(struct adapter *adap) int cxgb4_thermal_init(struct adapter *adap)
{ {
struct ch_thermal *ch_thermal = &adap->ch_thermal; struct ch_thermal *ch_thermal = &adap->ch_thermal;
char ch_tz_name[THERMAL_NAME_LENGTH];
int num_trip = CXGB4_NUM_TRIPS; int num_trip = CXGB4_NUM_TRIPS;
u32 param, val; u32 param, val;
int ret; int ret;
@ -82,7 +83,8 @@ int cxgb4_thermal_init(struct adapter *adap)
ch_thermal->trip_type = THERMAL_TRIP_CRITICAL; ch_thermal->trip_type = THERMAL_TRIP_CRITICAL;
} }
ch_thermal->tzdev = thermal_zone_device_register("cxgb4", num_trip, snprintf(ch_tz_name, sizeof(ch_tz_name), "cxgb4_%s", adap->name);
ch_thermal->tzdev = thermal_zone_device_register(ch_tz_name, num_trip,
0, adap, 0, adap,
&cxgb4_thermal_ops, &cxgb4_thermal_ops,
NULL, 0, 0); NULL, 0, 0);
@ -105,7 +107,9 @@ int cxgb4_thermal_init(struct adapter *adap)
int cxgb4_thermal_remove(struct adapter *adap) int cxgb4_thermal_remove(struct adapter *adap)
{ {
if (adap->ch_thermal.tzdev) if (adap->ch_thermal.tzdev) {
thermal_zone_device_unregister(adap->ch_thermal.tzdev); thermal_zone_device_unregister(adap->ch_thermal.tzdev);
adap->ch_thermal.tzdev = NULL;
}
return 0; return 0;
} }

View file

@ -2446,8 +2446,8 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev)
port->reset = devm_reset_control_get_exclusive(dev, NULL); port->reset = devm_reset_control_get_exclusive(dev, NULL);
if (IS_ERR(port->reset)) { if (IS_ERR(port->reset)) {
dev_err(dev, "no reset\n"); dev_err(dev, "no reset\n");
clk_disable_unprepare(port->pclk); ret = PTR_ERR(port->reset);
return PTR_ERR(port->reset); goto unprepare;
} }
reset_control_reset(port->reset); reset_control_reset(port->reset);
usleep_range(100, 500); usleep_range(100, 500);
@ -2502,13 +2502,13 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev)
IRQF_SHARED, IRQF_SHARED,
port_names[port->id], port_names[port->id],
port); port);
if (ret) { if (ret)
clk_disable_unprepare(port->pclk); goto unprepare;
return ret;
}
ret = register_netdev(netdev); ret = register_netdev(netdev);
if (!ret) { if (ret)
goto unprepare;
netdev_info(netdev, netdev_info(netdev,
"irq %d, DMA @ 0x%pap, GMAC @ 0x%pap\n", "irq %d, DMA @ 0x%pap, GMAC @ 0x%pap\n",
port->irq, &dmares->start, port->irq, &dmares->start,
@ -2518,9 +2518,9 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev)
netdev_info(netdev, netdev_info(netdev,
"PHY init failed, deferring to ifup time\n"); "PHY init failed, deferring to ifup time\n");
return 0; return 0;
}
port->netdev = NULL; unprepare:
clk_disable_unprepare(port->pclk);
return ret; return ret;
} }

View file

@ -2282,8 +2282,10 @@ static int hns_nic_dev_probe(struct platform_device *pdev)
priv->enet_ver = AE_VERSION_1; priv->enet_ver = AE_VERSION_1;
else if (acpi_dev_found(hns_enet_acpi_match[1].id)) else if (acpi_dev_found(hns_enet_acpi_match[1].id))
priv->enet_ver = AE_VERSION_2; priv->enet_ver = AE_VERSION_2;
else else {
return -ENXIO; ret = -ENXIO;
goto out_read_prop_fail;
}
/* try to find port-idx-in-ae first */ /* try to find port-idx-in-ae first */
ret = acpi_node_get_property_reference(dev->fwnode, ret = acpi_node_get_property_reference(dev->fwnode,
@ -2299,7 +2301,8 @@ static int hns_nic_dev_probe(struct platform_device *pdev)
priv->fwnode = args.fwnode; priv->fwnode = args.fwnode;
} else { } else {
dev_err(dev, "cannot read cfg data from OF or acpi\n"); dev_err(dev, "cannot read cfg data from OF or acpi\n");
return -ENXIO; ret = -ENXIO;
goto out_read_prop_fail;
} }
ret = device_property_read_u32(dev, "port-idx-in-ae", &port_id); ret = device_property_read_u32(dev, "port-idx-in-ae", &port_id);

View file

@ -21,6 +21,7 @@
#include <net/pkt_cls.h> #include <net/pkt_cls.h>
#include <net/tcp.h> #include <net/tcp.h>
#include <net/vxlan.h> #include <net/vxlan.h>
#include <net/geneve.h>
#include "hnae3.h" #include "hnae3.h"
#include "hns3_enet.h" #include "hns3_enet.h"
@ -780,7 +781,7 @@ static int hns3_get_l4_protocol(struct sk_buff *skb, u8 *ol4_proto,
* and it is udp packet, which has a dest port as the IANA assigned. * and it is udp packet, which has a dest port as the IANA assigned.
* the hardware is expected to do the checksum offload, but the * the hardware is expected to do the checksum offload, but the
* hardware will not do the checksum offload when udp dest port is * hardware will not do the checksum offload when udp dest port is
* 4789. * 4789 or 6081.
*/ */
static bool hns3_tunnel_csum_bug(struct sk_buff *skb) static bool hns3_tunnel_csum_bug(struct sk_buff *skb)
{ {
@ -789,7 +790,8 @@ static bool hns3_tunnel_csum_bug(struct sk_buff *skb)
l4.hdr = skb_transport_header(skb); l4.hdr = skb_transport_header(skb);
if (!(!skb->encapsulation && if (!(!skb->encapsulation &&
l4.udp->dest == htons(IANA_VXLAN_UDP_PORT))) (l4.udp->dest == htons(IANA_VXLAN_UDP_PORT) ||
l4.udp->dest == htons(GENEVE_UDP_PORT))))
return false; return false;
skb_checksum_help(skb); skb_checksum_help(skb);

View file

@ -479,6 +479,9 @@ static int reset_rx_pools(struct ibmvnic_adapter *adapter)
int i, j, rc; int i, j, rc;
u64 *size_array; u64 *size_array;
if (!adapter->rx_pool)
return -1;
size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) + size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size)); be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size));
@ -649,6 +652,9 @@ static int reset_tx_pools(struct ibmvnic_adapter *adapter)
int tx_scrqs; int tx_scrqs;
int i, rc; int i, rc;
if (!adapter->tx_pool)
return -1;
tx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs); tx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
for (i = 0; i < tx_scrqs; i++) { for (i = 0; i < tx_scrqs; i++) {
rc = reset_one_tx_pool(adapter, &adapter->tso_pool[i]); rc = reset_one_tx_pool(adapter, &adapter->tso_pool[i]);
@ -2011,7 +2017,10 @@ static int do_reset(struct ibmvnic_adapter *adapter,
adapter->req_rx_add_entries_per_subcrq != adapter->req_rx_add_entries_per_subcrq !=
old_num_rx_slots || old_num_rx_slots ||
adapter->req_tx_entries_per_subcrq != adapter->req_tx_entries_per_subcrq !=
old_num_tx_slots) { old_num_tx_slots ||
!adapter->rx_pool ||
!adapter->tso_pool ||
!adapter->tx_pool) {
release_rx_pools(adapter); release_rx_pools(adapter);
release_tx_pools(adapter); release_tx_pools(adapter);
release_napi(adapter); release_napi(adapter);
@ -2024,10 +2033,14 @@ static int do_reset(struct ibmvnic_adapter *adapter,
} else { } else {
rc = reset_tx_pools(adapter); rc = reset_tx_pools(adapter);
if (rc) if (rc)
netdev_dbg(adapter->netdev, "reset tx pools failed (%d)\n",
rc);
goto out; goto out;
rc = reset_rx_pools(adapter); rc = reset_rx_pools(adapter);
if (rc) if (rc)
netdev_dbg(adapter->netdev, "reset rx pools failed (%d)\n",
rc);
goto out; goto out;
} }
ibmvnic_disable_irqs(adapter); ibmvnic_disable_irqs(adapter);

View file

@ -114,7 +114,7 @@ static int mlx4_buddy_init(struct mlx4_buddy *buddy, int max_order)
goto err_out; goto err_out;
for (i = 0; i <= buddy->max_order; ++i) { for (i = 0; i <= buddy->max_order; ++i) {
s = BITS_TO_LONGS(1 << (buddy->max_order - i)); s = BITS_TO_LONGS(1UL << (buddy->max_order - i));
buddy->bits[i] = kvmalloc_array(s, sizeof(long), GFP_KERNEL | __GFP_ZERO); buddy->bits[i] = kvmalloc_array(s, sizeof(long), GFP_KERNEL | __GFP_ZERO);
if (!buddy->bits[i]) if (!buddy->bits[i])
goto err_out_free; goto err_out_free;

View file

@ -61,6 +61,7 @@ struct nfp_tun_active_tuns {
* @flags: options part of the request * @flags: options part of the request
* @tun_info.ipv6: dest IPv6 address of active route * @tun_info.ipv6: dest IPv6 address of active route
* @tun_info.egress_port: port the encapsulated packet egressed * @tun_info.egress_port: port the encapsulated packet egressed
* @tun_info.extra: reserved for future use
* @tun_info: tunnels that have sent traffic in reported period * @tun_info: tunnels that have sent traffic in reported period
*/ */
struct nfp_tun_active_tuns_v6 { struct nfp_tun_active_tuns_v6 {
@ -70,6 +71,7 @@ struct nfp_tun_active_tuns_v6 {
struct route_ip_info_v6 { struct route_ip_info_v6 {
struct in6_addr ipv6; struct in6_addr ipv6;
__be32 egress_port; __be32 egress_port;
__be32 extra[2];
} tun_info[]; } tun_info[];
}; };

View file

@ -496,9 +496,7 @@ int ionic_txrx_napi(struct napi_struct *napi, int budget)
struct ionic_cq *txcq; struct ionic_cq *txcq;
u32 rx_work_done = 0; u32 rx_work_done = 0;
u32 tx_work_done = 0; u32 tx_work_done = 0;
u32 work_done = 0;
u32 flags = 0; u32 flags = 0;
bool unmask;
lif = rxcq->bound_q->lif; lif = rxcq->bound_q->lif;
idev = &lif->ionic->idev; idev = &lif->ionic->idev;
@ -512,17 +510,12 @@ int ionic_txrx_napi(struct napi_struct *napi, int budget)
if (rx_work_done) if (rx_work_done)
ionic_rx_fill_cb(rxcq->bound_q); ionic_rx_fill_cb(rxcq->bound_q);
unmask = (rx_work_done < budget) && (tx_work_done < lif->tx_budget); if (rx_work_done < budget && napi_complete_done(napi, rx_work_done)) {
if (unmask && napi_complete_done(napi, rx_work_done)) {
flags |= IONIC_INTR_CRED_UNMASK; flags |= IONIC_INTR_CRED_UNMASK;
DEBUG_STATS_INTR_REARM(rxcq->bound_intr); DEBUG_STATS_INTR_REARM(rxcq->bound_intr);
work_done = rx_work_done;
} else {
work_done = budget;
} }
if (work_done || flags) { if (rx_work_done || flags) {
flags |= IONIC_INTR_CRED_RESET_COALESCE; flags |= IONIC_INTR_CRED_RESET_COALESCE;
ionic_intr_credits(idev->intr_ctrl, rxcq->bound_intr->index, ionic_intr_credits(idev->intr_ctrl, rxcq->bound_intr->index,
tx_work_done + rx_work_done, flags); tx_work_done + rx_work_done, flags);
@ -531,7 +524,7 @@ int ionic_txrx_napi(struct napi_struct *napi, int budget)
DEBUG_STATS_NAPI_POLL(qcq, rx_work_done); DEBUG_STATS_NAPI_POLL(qcq, rx_work_done);
DEBUG_STATS_NAPI_POLL(qcq, tx_work_done); DEBUG_STATS_NAPI_POLL(qcq, tx_work_done);
return work_done; return rx_work_done;
} }
static dma_addr_t ionic_tx_map_single(struct ionic_queue *q, static dma_addr_t ionic_tx_map_single(struct ionic_queue *q,

View file

@ -1342,6 +1342,51 @@ static inline int ravb_hook_irq(unsigned int irq, irq_handler_t handler,
return error; return error;
} }
/* MDIO bus init function */
static int ravb_mdio_init(struct ravb_private *priv)
{
struct platform_device *pdev = priv->pdev;
struct device *dev = &pdev->dev;
int error;
/* Bitbang init */
priv->mdiobb.ops = &bb_ops;
/* MII controller setting */
priv->mii_bus = alloc_mdio_bitbang(&priv->mdiobb);
if (!priv->mii_bus)
return -ENOMEM;
/* Hook up MII support for ethtool */
priv->mii_bus->name = "ravb_mii";
priv->mii_bus->parent = dev;
snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
pdev->name, pdev->id);
/* Register MDIO bus */
error = of_mdiobus_register(priv->mii_bus, dev->of_node);
if (error)
goto out_free_bus;
return 0;
out_free_bus:
free_mdio_bitbang(priv->mii_bus);
return error;
}
/* MDIO bus release function */
static int ravb_mdio_release(struct ravb_private *priv)
{
/* Unregister mdio bus */
mdiobus_unregister(priv->mii_bus);
/* Free bitbang info */
free_mdio_bitbang(priv->mii_bus);
return 0;
}
/* Network device open function for Ethernet AVB */ /* Network device open function for Ethernet AVB */
static int ravb_open(struct net_device *ndev) static int ravb_open(struct net_device *ndev)
{ {
@ -1350,6 +1395,13 @@ static int ravb_open(struct net_device *ndev)
struct device *dev = &pdev->dev; struct device *dev = &pdev->dev;
int error; int error;
/* MDIO bus init */
error = ravb_mdio_init(priv);
if (error) {
netdev_err(ndev, "failed to initialize MDIO\n");
return error;
}
napi_enable(&priv->napi[RAVB_BE]); napi_enable(&priv->napi[RAVB_BE]);
napi_enable(&priv->napi[RAVB_NC]); napi_enable(&priv->napi[RAVB_NC]);
@ -1427,6 +1479,7 @@ out_free_irq:
out_napi_off: out_napi_off:
napi_disable(&priv->napi[RAVB_NC]); napi_disable(&priv->napi[RAVB_NC]);
napi_disable(&priv->napi[RAVB_BE]); napi_disable(&priv->napi[RAVB_BE]);
ravb_mdio_release(priv);
return error; return error;
} }
@ -1736,6 +1789,8 @@ static int ravb_close(struct net_device *ndev)
ravb_ring_free(ndev, RAVB_BE); ravb_ring_free(ndev, RAVB_BE);
ravb_ring_free(ndev, RAVB_NC); ravb_ring_free(ndev, RAVB_NC);
ravb_mdio_release(priv);
return 0; return 0;
} }
@ -1887,51 +1942,6 @@ static const struct net_device_ops ravb_netdev_ops = {
.ndo_set_features = ravb_set_features, .ndo_set_features = ravb_set_features,
}; };
/* MDIO bus init function */
static int ravb_mdio_init(struct ravb_private *priv)
{
struct platform_device *pdev = priv->pdev;
struct device *dev = &pdev->dev;
int error;
/* Bitbang init */
priv->mdiobb.ops = &bb_ops;
/* MII controller setting */
priv->mii_bus = alloc_mdio_bitbang(&priv->mdiobb);
if (!priv->mii_bus)
return -ENOMEM;
/* Hook up MII support for ethtool */
priv->mii_bus->name = "ravb_mii";
priv->mii_bus->parent = dev;
snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
pdev->name, pdev->id);
/* Register MDIO bus */
error = of_mdiobus_register(priv->mii_bus, dev->of_node);
if (error)
goto out_free_bus;
return 0;
out_free_bus:
free_mdio_bitbang(priv->mii_bus);
return error;
}
/* MDIO bus release function */
static int ravb_mdio_release(struct ravb_private *priv)
{
/* Unregister mdio bus */
mdiobus_unregister(priv->mii_bus);
/* Free bitbang info */
free_mdio_bitbang(priv->mii_bus);
return 0;
}
static const struct of_device_id ravb_match_table[] = { static const struct of_device_id ravb_match_table[] = {
{ .compatible = "renesas,etheravb-r8a7790", .data = (void *)RCAR_GEN2 }, { .compatible = "renesas,etheravb-r8a7790", .data = (void *)RCAR_GEN2 },
{ .compatible = "renesas,etheravb-r8a7794", .data = (void *)RCAR_GEN2 }, { .compatible = "renesas,etheravb-r8a7794", .data = (void *)RCAR_GEN2 },
@ -2174,13 +2184,6 @@ static int ravb_probe(struct platform_device *pdev)
eth_hw_addr_random(ndev); eth_hw_addr_random(ndev);
} }
/* MDIO bus init */
error = ravb_mdio_init(priv);
if (error) {
dev_err(&pdev->dev, "failed to initialize MDIO\n");
goto out_dma_free;
}
netif_napi_add(ndev, &priv->napi[RAVB_BE], ravb_poll, 64); netif_napi_add(ndev, &priv->napi[RAVB_BE], ravb_poll, 64);
netif_napi_add(ndev, &priv->napi[RAVB_NC], ravb_poll, 64); netif_napi_add(ndev, &priv->napi[RAVB_NC], ravb_poll, 64);
@ -2202,8 +2205,6 @@ static int ravb_probe(struct platform_device *pdev)
out_napi_del: out_napi_del:
netif_napi_del(&priv->napi[RAVB_NC]); netif_napi_del(&priv->napi[RAVB_NC]);
netif_napi_del(&priv->napi[RAVB_BE]); netif_napi_del(&priv->napi[RAVB_BE]);
ravb_mdio_release(priv);
out_dma_free:
dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat, dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat,
priv->desc_bat_dma); priv->desc_bat_dma);
@ -2235,7 +2236,6 @@ static int ravb_remove(struct platform_device *pdev)
unregister_netdev(ndev); unregister_netdev(ndev);
netif_napi_del(&priv->napi[RAVB_NC]); netif_napi_del(&priv->napi[RAVB_NC]);
netif_napi_del(&priv->napi[RAVB_BE]); netif_napi_del(&priv->napi[RAVB_BE]);
ravb_mdio_release(priv);
pm_runtime_disable(&pdev->dev); pm_runtime_disable(&pdev->dev);
free_netdev(ndev); free_netdev(ndev);
platform_set_drvdata(pdev, NULL); platform_set_drvdata(pdev, NULL);

View file

@ -36,7 +36,7 @@ bool ef100_rx_buf_hash_valid(const u8 *prefix)
return PREFIX_FIELD(prefix, RSS_HASH_VALID); return PREFIX_FIELD(prefix, RSS_HASH_VALID);
} }
static bool check_fcs(struct efx_channel *channel, u32 *prefix) static bool ef100_has_fcs_error(struct efx_channel *channel, u32 *prefix)
{ {
u16 rxclass; u16 rxclass;
u8 l2status; u8 l2status;
@ -46,11 +46,11 @@ static bool check_fcs(struct efx_channel *channel, u32 *prefix)
if (likely(l2status == ESE_GZ_RH_HCLASS_L2_STATUS_OK)) if (likely(l2status == ESE_GZ_RH_HCLASS_L2_STATUS_OK))
/* Everything is ok */ /* Everything is ok */
return 0; return false;
if (l2status == ESE_GZ_RH_HCLASS_L2_STATUS_FCS_ERR) if (l2status == ESE_GZ_RH_HCLASS_L2_STATUS_FCS_ERR)
channel->n_rx_eth_crc_err++; channel->n_rx_eth_crc_err++;
return 1; return true;
} }
void __ef100_rx_packet(struct efx_channel *channel) void __ef100_rx_packet(struct efx_channel *channel)
@ -63,7 +63,7 @@ void __ef100_rx_packet(struct efx_channel *channel)
prefix = (u32 *)(eh - ESE_GZ_RX_PKT_PREFIX_LEN); prefix = (u32 *)(eh - ESE_GZ_RX_PKT_PREFIX_LEN);
if (check_fcs(channel, prefix) && if (ef100_has_fcs_error(channel, prefix) &&
unlikely(!(efx->net_dev->features & NETIF_F_RXALL))) unlikely(!(efx->net_dev->features & NETIF_F_RXALL)))
goto out; goto out;

View file

@ -174,6 +174,8 @@ void am65_cpsw_nuss_adjust_link(struct net_device *ndev)
if (phy->speed == 10 && phy_interface_is_rgmii(phy)) if (phy->speed == 10 && phy_interface_is_rgmii(phy))
/* Can be used with in band mode only */ /* Can be used with in band mode only */
mac_control |= CPSW_SL_CTL_EXT_EN; mac_control |= CPSW_SL_CTL_EXT_EN;
if (phy->speed == 100 && phy->interface == PHY_INTERFACE_MODE_RMII)
mac_control |= CPSW_SL_CTL_IFCTL_A;
if (phy->duplex) if (phy->duplex)
mac_control |= CPSW_SL_CTL_FULLDUPLEX; mac_control |= CPSW_SL_CTL_FULLDUPLEX;

View file

@ -1116,7 +1116,7 @@ static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev,
HOST_PORT_NUM, ALE_VLAN, vid); HOST_PORT_NUM, ALE_VLAN, vid);
ret |= cpsw_ale_del_mcast(cpsw->ale, priv->ndev->broadcast, ret |= cpsw_ale_del_mcast(cpsw->ale, priv->ndev->broadcast,
0, ALE_VLAN, vid); 0, ALE_VLAN, vid);
ret |= cpsw_ale_flush_multicast(cpsw->ale, 0, vid); ret |= cpsw_ale_flush_multicast(cpsw->ale, ALE_PORT_HOST, vid);
err: err:
pm_runtime_put(cpsw->dev); pm_runtime_put(cpsw->dev);
return ret; return ret;

View file

@ -1032,19 +1032,34 @@ static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev,
return ret; return ret;
} }
/* reset the return code as pm_runtime_get_sync() can return
* non zero values as well.
*/
ret = 0;
for (i = 0; i < cpsw->data.slaves; i++) { for (i = 0; i < cpsw->data.slaves; i++) {
if (cpsw->slaves[i].ndev && if (cpsw->slaves[i].ndev &&
vid == cpsw->slaves[i].port_vlan) vid == cpsw->slaves[i].port_vlan) {
ret = -EINVAL;
goto err; goto err;
} }
}
dev_dbg(priv->dev, "removing vlanid %d from vlan filter\n", vid); dev_dbg(priv->dev, "removing vlanid %d from vlan filter\n", vid);
cpsw_ale_del_vlan(cpsw->ale, vid, 0); ret = cpsw_ale_del_vlan(cpsw->ale, vid, 0);
cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr, if (ret)
dev_err(priv->dev, "cpsw_ale_del_vlan() failed: ret %d\n", ret);
ret = cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr,
HOST_PORT_NUM, ALE_VLAN, vid); HOST_PORT_NUM, ALE_VLAN, vid);
cpsw_ale_del_mcast(cpsw->ale, priv->ndev->broadcast, if (ret)
dev_err(priv->dev, "cpsw_ale_del_ucast() failed: ret %d\n",
ret);
ret = cpsw_ale_del_mcast(cpsw->ale, priv->ndev->broadcast,
0, ALE_VLAN, vid); 0, ALE_VLAN, vid);
cpsw_ale_flush_multicast(cpsw->ale, 0, vid); if (ret)
dev_err(priv->dev, "cpsw_ale_del_mcast failed. ret %d\n",
ret);
cpsw_ale_flush_multicast(cpsw->ale, ALE_PORT_HOST, vid);
ret = 0;
err: err:
pm_runtime_put(cpsw->dev); pm_runtime_put(cpsw->dev);
return ret; return ret;

View file

@ -1179,6 +1179,7 @@ static int gtp_genl_fill_info(struct sk_buff *skb, u32 snd_portid, u32 snd_seq,
goto nlmsg_failure; goto nlmsg_failure;
if (nla_put_u32(skb, GTPA_VERSION, pctx->gtp_version) || if (nla_put_u32(skb, GTPA_VERSION, pctx->gtp_version) ||
nla_put_u32(skb, GTPA_LINK, pctx->dev->ifindex) ||
nla_put_be32(skb, GTPA_PEER_ADDRESS, pctx->peer_addr_ip4.s_addr) || nla_put_be32(skb, GTPA_PEER_ADDRESS, pctx->peer_addr_ip4.s_addr) ||
nla_put_be32(skb, GTPA_MS_ADDRESS, pctx->ms_addr_ip4.s_addr)) nla_put_be32(skb, GTPA_MS_ADDRESS, pctx->ms_addr_ip4.s_addr))
goto nla_put_failure; goto nla_put_failure;

View file

@ -215,9 +215,9 @@ static int dp83867_set_wol(struct phy_device *phydev,
if (wol->wolopts & WAKE_MAGICSECURE) { if (wol->wolopts & WAKE_MAGICSECURE) {
phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_RXFSOP1, phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_RXFSOP1,
(wol->sopass[1] << 8) | wol->sopass[0]); (wol->sopass[1] << 8) | wol->sopass[0]);
phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_RXFSOP1, phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_RXFSOP2,
(wol->sopass[3] << 8) | wol->sopass[2]); (wol->sopass[3] << 8) | wol->sopass[2]);
phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_RXFSOP1, phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_RXFSOP3,
(wol->sopass[5] << 8) | wol->sopass[4]); (wol->sopass[5] << 8) | wol->sopass[4]);
val_rxcfg |= DP83867_WOL_SEC_EN; val_rxcfg |= DP83867_WOL_SEC_EN;

View file

@ -427,18 +427,18 @@ static int dp83869_config_init(struct phy_device *phydev)
return ret; return ret;
val = phy_read_mmd(phydev, DP83869_DEVADDR, DP83869_RGMIICTL); val = phy_read_mmd(phydev, DP83869_DEVADDR, DP83869_RGMIICTL);
val &= ~(DP83869_RGMII_TX_CLK_DELAY_EN |
DP83869_RGMII_RX_CLK_DELAY_EN);
if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID)
val |= (DP83869_RGMII_TX_CLK_DELAY_EN | val |= (DP83869_RGMII_TX_CLK_DELAY_EN |
DP83869_RGMII_RX_CLK_DELAY_EN); DP83869_RGMII_RX_CLK_DELAY_EN);
if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID)
val &= ~(DP83869_RGMII_TX_CLK_DELAY_EN |
DP83869_RGMII_RX_CLK_DELAY_EN);
if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)
val |= DP83869_RGMII_TX_CLK_DELAY_EN; val &= ~DP83869_RGMII_TX_CLK_DELAY_EN;
if (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) if (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID)
val |= DP83869_RGMII_RX_CLK_DELAY_EN; val &= ~DP83869_RGMII_RX_CLK_DELAY_EN;
ret = phy_write_mmd(phydev, DP83869_DEVADDR, DP83869_RGMIICTL, ret = phy_write_mmd(phydev, DP83869_DEVADDR, DP83869_RGMIICTL,
val); val);

View file

@ -252,6 +252,7 @@ config USB_NET_CDC_EEM
config USB_NET_CDC_NCM config USB_NET_CDC_NCM
tristate "CDC NCM support" tristate "CDC NCM support"
depends on USB_USBNET depends on USB_USBNET
select USB_NET_CDCETHER
default y default y
help help
This driver provides support for CDC NCM (Network Control Model This driver provides support for CDC NCM (Network Control Model

View file

@ -296,7 +296,7 @@ int asix_read_phy_addr(struct usbnet *dev, int internal)
netdev_dbg(dev->net, "asix_get_phy_addr()\n"); netdev_dbg(dev->net, "asix_get_phy_addr()\n");
if (ret < 0) { if (ret < 2) {
netdev_err(dev->net, "Error reading PHYID register: %02x\n", ret); netdev_err(dev->net, "Error reading PHYID register: %02x\n", ret);
goto out; goto out;
} }

View file

@ -625,6 +625,10 @@ static const struct usb_device_id products[] = {
USB_DEVICE(0x0a46, 0x1269), /* DM9621A USB to Fast Ethernet Adapter */ USB_DEVICE(0x0a46, 0x1269), /* DM9621A USB to Fast Ethernet Adapter */
.driver_info = (unsigned long)&dm9601_info, .driver_info = (unsigned long)&dm9601_info,
}, },
{
USB_DEVICE(0x0586, 0x3427), /* ZyXEL Keenetic Plus DSL xDSL modem */
.driver_info = (unsigned long)&dm9601_info,
},
{}, // END {}, // END
}; };

View file

@ -229,7 +229,7 @@ static void hdlc_setup_dev(struct net_device *dev)
dev->min_mtu = 68; dev->min_mtu = 68;
dev->max_mtu = HDLC_MAX_MTU; dev->max_mtu = HDLC_MAX_MTU;
dev->type = ARPHRD_RAWHDLC; dev->type = ARPHRD_RAWHDLC;
dev->hard_header_len = 16; dev->hard_header_len = 0;
dev->needed_headroom = 0; dev->needed_headroom = 0;
dev->addr_len = 0; dev->addr_len = 0;
dev->header_ops = &hdlc_null_ops; dev->header_ops = &hdlc_null_ops;

View file

@ -370,6 +370,7 @@ static int cisco_ioctl(struct net_device *dev, struct ifreq *ifr)
memcpy(&state(hdlc)->settings, &new_settings, size); memcpy(&state(hdlc)->settings, &new_settings, size);
spin_lock_init(&state(hdlc)->lock); spin_lock_init(&state(hdlc)->lock);
dev->header_ops = &cisco_header_ops; dev->header_ops = &cisco_header_ops;
dev->hard_header_len = sizeof(struct hdlc_header);
dev->type = ARPHRD_CISCO; dev->type = ARPHRD_CISCO;
call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE, dev); call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE, dev);
netif_dormant_on(dev); netif_dormant_on(dev);

View file

@ -210,6 +210,8 @@ static void lapbeth_data_transmit(struct net_device *ndev, struct sk_buff *skb)
skb->dev = dev = lapbeth->ethdev; skb->dev = dev = lapbeth->ethdev;
skb_reset_network_header(skb);
dev_hard_header(skb, dev, ETH_P_DEC, bcast_addr, NULL, 0); dev_hard_header(skb, dev, ETH_P_DEC, bcast_addr, NULL, 0);
dev_queue_xmit(skb); dev_queue_xmit(skb);
@ -340,6 +342,7 @@ static int lapbeth_new_device(struct net_device *dev)
*/ */
ndev->needed_headroom = -1 + 3 + 2 + dev->hard_header_len ndev->needed_headroom = -1 + 3 + 2 + dev->hard_header_len
+ dev->needed_headroom; + dev->needed_headroom;
ndev->needed_tailroom = dev->needed_tailroom;
lapbeth = netdev_priv(ndev); lapbeth = netdev_priv(ndev);
lapbeth->axdev = ndev; lapbeth->axdev = ndev;

View file

@ -966,7 +966,7 @@ static int st95hf_in_send_cmd(struct nfc_digital_dev *ddev,
rc = down_killable(&stcontext->exchange_lock); rc = down_killable(&stcontext->exchange_lock);
if (rc) { if (rc) {
WARN(1, "Semaphore is not found up in st95hf_in_send_cmd\n"); WARN(1, "Semaphore is not found up in st95hf_in_send_cmd\n");
return rc; goto free_skb_resp;
} }
rc = st95hf_spi_send(&stcontext->spicontext, skb->data, rc = st95hf_spi_send(&stcontext->spicontext, skb->data,

View file

@ -2537,7 +2537,7 @@ void vhost_disable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) { if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) {
r = vhost_update_used_flags(vq); r = vhost_update_used_flags(vq);
if (r) if (r)
vq_err(vq, "Failed to enable notification at %p: %d\n", vq_err(vq, "Failed to disable notification at %p: %d\n",
&vq->used->flags, r); &vq->used->flags, r);
} }
} }

View file

@ -161,8 +161,8 @@ responded:
} }
} }
rtt_us = rxrpc_kernel_get_srtt(call->net->socket, call->rxcall); if (rxrpc_kernel_get_srtt(call->net->socket, call->rxcall, &rtt_us) &&
if (rtt_us < server->probe.rtt) { rtt_us < server->probe.rtt) {
server->probe.rtt = rtt_us; server->probe.rtt = rtt_us;
server->rtt = rtt_us; server->rtt = rtt_us;
alist->preferred = index; alist->preferred = index;

View file

@ -401,22 +401,24 @@ struct afs_vlserver {
#define AFS_VLSERVER_FL_PROBED 0 /* The VL server has been probed */ #define AFS_VLSERVER_FL_PROBED 0 /* The VL server has been probed */
#define AFS_VLSERVER_FL_PROBING 1 /* VL server is being probed */ #define AFS_VLSERVER_FL_PROBING 1 /* VL server is being probed */
#define AFS_VLSERVER_FL_IS_YFS 2 /* Server is YFS not AFS */ #define AFS_VLSERVER_FL_IS_YFS 2 /* Server is YFS not AFS */
#define AFS_VLSERVER_FL_RESPONDING 3 /* VL server is responding */
rwlock_t lock; /* Lock on addresses */ rwlock_t lock; /* Lock on addresses */
atomic_t usage; atomic_t usage;
unsigned int rtt; /* Server's current RTT in uS */
/* Probe state */ /* Probe state */
wait_queue_head_t probe_wq; wait_queue_head_t probe_wq;
atomic_t probe_outstanding; atomic_t probe_outstanding;
spinlock_t probe_lock; spinlock_t probe_lock;
struct { struct {
unsigned int rtt; /* RTT as ktime/64 */ unsigned int rtt; /* RTT in uS */
u32 abort_code; u32 abort_code;
short error; short error;
bool have_result; unsigned short flags;
bool responded:1; #define AFS_VLSERVER_PROBE_RESPONDED 0x01 /* At least once response (may be abort) */
bool is_yfs:1; #define AFS_VLSERVER_PROBE_IS_YFS 0x02 /* The peer appears to be YFS */
bool not_yfs:1; #define AFS_VLSERVER_PROBE_NOT_YFS 0x04 /* The peer appears not to be YFS */
bool local_failure:1; #define AFS_VLSERVER_PROBE_LOCAL_FAILURE 0x08 /* A local failure prevented a probe */
} probe; } probe;
u16 port; u16 port;

View file

@ -310,6 +310,11 @@ static int afs_proc_cell_vlservers_show(struct seq_file *m, void *v)
alist->preferred == i ? '>' : '-', alist->preferred == i ? '>' : '-',
&alist->addrs[i].transport); &alist->addrs[i].transport);
} }
seq_printf(m, " info: fl=%lx rtt=%d\n", vlserver->flags, vlserver->rtt);
seq_printf(m, " probe: fl=%x e=%d ac=%d out=%d\n",
vlserver->probe.flags, vlserver->probe.error,
vlserver->probe.abort_code,
atomic_read(&vlserver->probe_outstanding));
return 0; return 0;
} }

View file

@ -21,6 +21,7 @@ struct afs_vlserver *afs_alloc_vlserver(const char *name, size_t name_len,
rwlock_init(&vlserver->lock); rwlock_init(&vlserver->lock);
init_waitqueue_head(&vlserver->probe_wq); init_waitqueue_head(&vlserver->probe_wq);
spin_lock_init(&vlserver->probe_lock); spin_lock_init(&vlserver->probe_lock);
vlserver->rtt = UINT_MAX;
vlserver->name_len = name_len; vlserver->name_len = name_len;
vlserver->port = port; vlserver->port = port;
memcpy(vlserver->name, name, name_len); memcpy(vlserver->name, name, name_len);

View file

@ -11,15 +11,33 @@
#include "internal.h" #include "internal.h"
#include "protocol_yfs.h" #include "protocol_yfs.h"
static bool afs_vl_probe_done(struct afs_vlserver *server)
{
if (!atomic_dec_and_test(&server->probe_outstanding))
return false;
wake_up_var(&server->probe_outstanding); /*
* Handle the completion of a set of probes.
*/
static void afs_finished_vl_probe(struct afs_vlserver *server)
{
if (!(server->probe.flags & AFS_VLSERVER_PROBE_RESPONDED)) {
server->rtt = UINT_MAX;
clear_bit(AFS_VLSERVER_FL_RESPONDING, &server->flags);
}
clear_bit_unlock(AFS_VLSERVER_FL_PROBING, &server->flags); clear_bit_unlock(AFS_VLSERVER_FL_PROBING, &server->flags);
wake_up_bit(&server->flags, AFS_VLSERVER_FL_PROBING); wake_up_bit(&server->flags, AFS_VLSERVER_FL_PROBING);
return true; }
/*
* Handle the completion of a probe RPC call.
*/
static void afs_done_one_vl_probe(struct afs_vlserver *server, bool wake_up)
{
if (atomic_dec_and_test(&server->probe_outstanding)) {
afs_finished_vl_probe(server);
wake_up = true;
}
if (wake_up)
wake_up_all(&server->probe_wq);
} }
/* /*
@ -45,15 +63,20 @@ void afs_vlserver_probe_result(struct afs_call *call)
server->probe.error = 0; server->probe.error = 0;
goto responded; goto responded;
case -ECONNABORTED: case -ECONNABORTED:
if (!server->probe.responded) { if (!(server->probe.flags & AFS_VLSERVER_PROBE_RESPONDED)) {
server->probe.abort_code = call->abort_code; server->probe.abort_code = call->abort_code;
server->probe.error = ret; server->probe.error = ret;
} }
goto responded; goto responded;
case -ENOMEM: case -ENOMEM:
case -ENONET: case -ENONET:
server->probe.local_failure = true; case -EKEYEXPIRED:
afs_io_error(call, afs_io_error_vl_probe_fail); case -EKEYREVOKED:
case -EKEYREJECTED:
server->probe.flags |= AFS_VLSERVER_PROBE_LOCAL_FAILURE;
if (server->probe.error == 0)
server->probe.error = ret;
trace_afs_io_error(call->debug_id, ret, afs_io_error_vl_probe_fail);
goto out; goto out;
case -ECONNRESET: /* Responded, but call expired. */ case -ECONNRESET: /* Responded, but call expired. */
case -ERFKILL: case -ERFKILL:
@ -67,12 +90,12 @@ void afs_vlserver_probe_result(struct afs_call *call)
default: default:
clear_bit(index, &alist->responded); clear_bit(index, &alist->responded);
set_bit(index, &alist->failed); set_bit(index, &alist->failed);
if (!server->probe.responded && if (!(server->probe.flags & AFS_VLSERVER_PROBE_RESPONDED) &&
(server->probe.error == 0 || (server->probe.error == 0 ||
server->probe.error == -ETIMEDOUT || server->probe.error == -ETIMEDOUT ||
server->probe.error == -ETIME)) server->probe.error == -ETIME))
server->probe.error = ret; server->probe.error = ret;
afs_io_error(call, afs_io_error_vl_probe_fail); trace_afs_io_error(call->debug_id, ret, afs_io_error_vl_probe_fail);
goto out; goto out;
} }
@ -81,39 +104,36 @@ responded:
clear_bit(index, &alist->failed); clear_bit(index, &alist->failed);
if (call->service_id == YFS_VL_SERVICE) { if (call->service_id == YFS_VL_SERVICE) {
server->probe.is_yfs = true; server->probe.flags |= AFS_VLSERVER_PROBE_IS_YFS;
set_bit(AFS_VLSERVER_FL_IS_YFS, &server->flags); set_bit(AFS_VLSERVER_FL_IS_YFS, &server->flags);
alist->addrs[index].srx_service = call->service_id; alist->addrs[index].srx_service = call->service_id;
} else { } else {
server->probe.not_yfs = true; server->probe.flags |= AFS_VLSERVER_PROBE_NOT_YFS;
if (!server->probe.is_yfs) { if (!(server->probe.flags & AFS_VLSERVER_PROBE_IS_YFS)) {
clear_bit(AFS_VLSERVER_FL_IS_YFS, &server->flags); clear_bit(AFS_VLSERVER_FL_IS_YFS, &server->flags);
alist->addrs[index].srx_service = call->service_id; alist->addrs[index].srx_service = call->service_id;
} }
} }
rtt_us = rxrpc_kernel_get_srtt(call->net->socket, call->rxcall); if (rxrpc_kernel_get_srtt(call->net->socket, call->rxcall, &rtt_us) &&
if (rtt_us < server->probe.rtt) { rtt_us < server->probe.rtt) {
server->probe.rtt = rtt_us; server->probe.rtt = rtt_us;
server->rtt = rtt_us;
alist->preferred = index; alist->preferred = index;
have_result = true;
} }
smp_wmb(); /* Set rtt before responded. */ smp_wmb(); /* Set rtt before responded. */
server->probe.responded = true; server->probe.flags |= AFS_VLSERVER_PROBE_RESPONDED;
set_bit(AFS_VLSERVER_FL_PROBED, &server->flags); set_bit(AFS_VLSERVER_FL_PROBED, &server->flags);
set_bit(AFS_VLSERVER_FL_RESPONDING, &server->flags);
have_result = true;
out: out:
spin_unlock(&server->probe_lock); spin_unlock(&server->probe_lock);
_debug("probe [%u][%u] %pISpc rtt=%u ret=%d", _debug("probe [%u][%u] %pISpc rtt=%u ret=%d",
server_index, index, &alist->addrs[index].transport, rtt_us, ret); server_index, index, &alist->addrs[index].transport, rtt_us, ret);
have_result |= afs_vl_probe_done(server); afs_done_one_vl_probe(server, have_result);
if (have_result) {
server->probe.have_result = true;
wake_up_var(&server->probe.have_result);
wake_up_all(&server->probe_wq);
}
} }
/* /*
@ -151,11 +171,10 @@ static bool afs_do_probe_vlserver(struct afs_net *net,
in_progress = true; in_progress = true;
} else { } else {
afs_prioritise_error(_e, PTR_ERR(call), ac.abort_code); afs_prioritise_error(_e, PTR_ERR(call), ac.abort_code);
afs_done_one_vl_probe(server, false);
} }
} }
if (!in_progress)
afs_vl_probe_done(server);
return in_progress; return in_progress;
} }
@ -193,7 +212,7 @@ int afs_wait_for_vl_probes(struct afs_vlserver_list *vllist,
{ {
struct wait_queue_entry *waits; struct wait_queue_entry *waits;
struct afs_vlserver *server; struct afs_vlserver *server;
unsigned int rtt = UINT_MAX; unsigned int rtt = UINT_MAX, rtt_s;
bool have_responders = false; bool have_responders = false;
int pref = -1, i; int pref = -1, i;
@ -205,7 +224,7 @@ int afs_wait_for_vl_probes(struct afs_vlserver_list *vllist,
server = vllist->servers[i].server; server = vllist->servers[i].server;
if (!test_bit(AFS_VLSERVER_FL_PROBING, &server->flags)) if (!test_bit(AFS_VLSERVER_FL_PROBING, &server->flags))
__clear_bit(i, &untried); __clear_bit(i, &untried);
if (server->probe.responded) if (server->probe.flags & AFS_VLSERVER_PROBE_RESPONDED)
have_responders = true; have_responders = true;
} }
} }
@ -231,7 +250,7 @@ int afs_wait_for_vl_probes(struct afs_vlserver_list *vllist,
for (i = 0; i < vllist->nr_servers; i++) { for (i = 0; i < vllist->nr_servers; i++) {
if (test_bit(i, &untried)) { if (test_bit(i, &untried)) {
server = vllist->servers[i].server; server = vllist->servers[i].server;
if (server->probe.responded) if (server->probe.flags & AFS_VLSERVER_PROBE_RESPONDED)
goto stop; goto stop;
if (test_bit(AFS_VLSERVER_FL_PROBING, &server->flags)) if (test_bit(AFS_VLSERVER_FL_PROBING, &server->flags))
still_probing = true; still_probing = true;
@ -249,10 +268,11 @@ stop:
for (i = 0; i < vllist->nr_servers; i++) { for (i = 0; i < vllist->nr_servers; i++) {
if (test_bit(i, &untried)) { if (test_bit(i, &untried)) {
server = vllist->servers[i].server; server = vllist->servers[i].server;
if (server->probe.responded && rtt_s = READ_ONCE(server->rtt);
server->probe.rtt < rtt) { if (test_bit(AFS_VLSERVER_FL_RESPONDING, &server->flags) &&
rtt_s < rtt) {
pref = i; pref = i;
rtt = server->probe.rtt; rtt = rtt_s;
} }
remove_wait_queue(&server->probe_wq, &waits[i]); remove_wait_queue(&server->probe_wq, &waits[i]);

View file

@ -192,7 +192,8 @@ pick_server:
for (i = 0; i < vc->server_list->nr_servers; i++) { for (i = 0; i < vc->server_list->nr_servers; i++) {
struct afs_vlserver *s = vc->server_list->servers[i].server; struct afs_vlserver *s = vc->server_list->servers[i].server;
if (!test_bit(i, &vc->untried) || !s->probe.responded) if (!test_bit(i, &vc->untried) ||
!test_bit(AFS_VLSERVER_FL_RESPONDING, &s->flags))
continue; continue;
if (s->probe.rtt < rtt) { if (s->probe.rtt < rtt) {
vc->index = i; vc->index = i;
@ -262,10 +263,14 @@ no_more_servers:
for (i = 0; i < vc->server_list->nr_servers; i++) { for (i = 0; i < vc->server_list->nr_servers; i++) {
struct afs_vlserver *s = vc->server_list->servers[i].server; struct afs_vlserver *s = vc->server_list->servers[i].server;
if (test_bit(AFS_VLSERVER_FL_RESPONDING, &s->flags))
e.responded = true;
afs_prioritise_error(&e, READ_ONCE(s->probe.error), afs_prioritise_error(&e, READ_ONCE(s->probe.error),
s->probe.abort_code); s->probe.abort_code);
} }
error = e.error;
failed_set_error: failed_set_error:
vc->error = error; vc->error = error;
failed: failed:

View file

@ -9,6 +9,8 @@ struct ip_ct_sctp {
enum sctp_conntrack state; enum sctp_conntrack state;
__be32 vtag[IP_CT_DIR_MAX]; __be32 vtag[IP_CT_DIR_MAX];
u8 last_dir;
u8 flags;
}; };
#endif /* _NF_CONNTRACK_SCTP_H */ #endif /* _NF_CONNTRACK_SCTP_H */

View file

@ -43,8 +43,7 @@ int nfnetlink_has_listeners(struct net *net, unsigned int group);
int nfnetlink_send(struct sk_buff *skb, struct net *net, u32 portid, int nfnetlink_send(struct sk_buff *skb, struct net *net, u32 portid,
unsigned int group, int echo, gfp_t flags); unsigned int group, int echo, gfp_t flags);
int nfnetlink_set_err(struct net *net, u32 portid, u32 group, int error); int nfnetlink_set_err(struct net *net, u32 portid, u32 group, int error);
int nfnetlink_unicast(struct sk_buff *skb, struct net *net, u32 portid, int nfnetlink_unicast(struct sk_buff *skb, struct net *net, u32 portid);
int flags);
static inline u16 nfnl_msg_type(u8 subsys, u8 msg_type) static inline u16 nfnl_msg_type(u8 subsys, u8 msg_type)
{ {

View file

@ -71,7 +71,7 @@
* NETIF_F_IPV6_CSUM - Driver (device) is only able to checksum plain * NETIF_F_IPV6_CSUM - Driver (device) is only able to checksum plain
* TCP or UDP packets over IPv6. These are specifically * TCP or UDP packets over IPv6. These are specifically
* unencapsulated packets of the form IPv6|TCP or * unencapsulated packets of the form IPv6|TCP or
* IPv4|UDP where the Next Header field in the IPv6 * IPv6|UDP where the Next Header field in the IPv6
* header is either TCP or UDP. IPv6 extension headers * header is either TCP or UDP. IPv6 extension headers
* are not supported with this feature. This feature * are not supported with this feature. This feature
* cannot be set in features for a device with * cannot be set in features for a device with
@ -1056,7 +1056,16 @@ void kfree_skb(struct sk_buff *skb);
void kfree_skb_list(struct sk_buff *segs); void kfree_skb_list(struct sk_buff *segs);
void skb_dump(const char *level, const struct sk_buff *skb, bool full_pkt); void skb_dump(const char *level, const struct sk_buff *skb, bool full_pkt);
void skb_tx_error(struct sk_buff *skb); void skb_tx_error(struct sk_buff *skb);
#ifdef CONFIG_TRACEPOINTS
void consume_skb(struct sk_buff *skb); void consume_skb(struct sk_buff *skb);
#else
static inline void consume_skb(struct sk_buff *skb)
{
return kfree_skb(skb);
}
#endif
void __consume_stateless_skb(struct sk_buff *skb); void __consume_stateless_skb(struct sk_buff *skb);
void __kfree_skb(struct sk_buff *skb); void __kfree_skb(struct sk_buff *skb);
extern struct kmem_cache *skbuff_head_cache; extern struct kmem_cache *skbuff_head_cache;
@ -2658,7 +2667,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
* *
* Using max(32, L1_CACHE_BYTES) makes sense (especially with RPS) * Using max(32, L1_CACHE_BYTES) makes sense (especially with RPS)
* to reduce average number of cache lines per packet. * to reduce average number of cache lines per packet.
* get_rps_cpus() for example only access one 64 bytes aligned block : * get_rps_cpu() for example only access one 64 bytes aligned block :
* NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8) * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
*/ */
#ifndef NET_SKB_PAD #ifndef NET_SKB_PAD

View file

@ -59,7 +59,7 @@ bool rxrpc_kernel_abort_call(struct socket *, struct rxrpc_call *,
void rxrpc_kernel_end_call(struct socket *, struct rxrpc_call *); void rxrpc_kernel_end_call(struct socket *, struct rxrpc_call *);
void rxrpc_kernel_get_peer(struct socket *, struct rxrpc_call *, void rxrpc_kernel_get_peer(struct socket *, struct rxrpc_call *,
struct sockaddr_rxrpc *); struct sockaddr_rxrpc *);
u32 rxrpc_kernel_get_srtt(struct socket *, struct rxrpc_call *); bool rxrpc_kernel_get_srtt(struct socket *, struct rxrpc_call *, u32 *);
int rxrpc_kernel_charge_accept(struct socket *, rxrpc_notify_rx_t, int rxrpc_kernel_charge_accept(struct socket *, rxrpc_notify_rx_t,
rxrpc_user_attach_call_t, unsigned long, gfp_t, rxrpc_user_attach_call_t, unsigned long, gfp_t,
unsigned int); unsigned int);

View file

@ -494,7 +494,7 @@ int igmp6_event_report(struct sk_buff *skb);
#ifdef CONFIG_SYSCTL #ifdef CONFIG_SYSCTL
int ndisc_ifinfo_sysctl_change(struct ctl_table *ctl, int write, int ndisc_ifinfo_sysctl_change(struct ctl_table *ctl, int write,
void __user *buffer, size_t *lenp, loff_t *ppos); void *buffer, size_t *lenp, loff_t *ppos);
int ndisc_ifinfo_sysctl_strategy(struct ctl_table *ctl, int ndisc_ifinfo_sysctl_strategy(struct ctl_table *ctl,
void __user *oldval, size_t __user *oldlenp, void __user *oldval, size_t __user *oldlenp,
void __user *newval, size_t newlen); void __user *newval, size_t newlen);

View file

@ -143,6 +143,8 @@ static inline u64 nft_reg_load64(const u32 *sreg)
static inline void nft_data_copy(u32 *dst, const struct nft_data *src, static inline void nft_data_copy(u32 *dst, const struct nft_data *src,
unsigned int len) unsigned int len)
{ {
if (len % NFT_REG32_SIZE)
dst[len / NFT_REG32_SIZE] = 0;
memcpy(dst, src, len); memcpy(dst, src, len);
} }

View file

@ -138,11 +138,16 @@ enum rxrpc_recvmsg_trace {
}; };
enum rxrpc_rtt_tx_trace { enum rxrpc_rtt_tx_trace {
rxrpc_rtt_tx_cancel,
rxrpc_rtt_tx_data, rxrpc_rtt_tx_data,
rxrpc_rtt_tx_no_slot,
rxrpc_rtt_tx_ping, rxrpc_rtt_tx_ping,
}; };
enum rxrpc_rtt_rx_trace { enum rxrpc_rtt_rx_trace {
rxrpc_rtt_rx_cancel,
rxrpc_rtt_rx_lost,
rxrpc_rtt_rx_obsolete,
rxrpc_rtt_rx_ping_response, rxrpc_rtt_rx_ping_response,
rxrpc_rtt_rx_requested_ack, rxrpc_rtt_rx_requested_ack,
}; };
@ -339,10 +344,15 @@ enum rxrpc_tx_point {
E_(rxrpc_recvmsg_wait, "WAIT") E_(rxrpc_recvmsg_wait, "WAIT")
#define rxrpc_rtt_tx_traces \ #define rxrpc_rtt_tx_traces \
EM(rxrpc_rtt_tx_cancel, "CNCE") \
EM(rxrpc_rtt_tx_data, "DATA") \ EM(rxrpc_rtt_tx_data, "DATA") \
EM(rxrpc_rtt_tx_no_slot, "FULL") \
E_(rxrpc_rtt_tx_ping, "PING") E_(rxrpc_rtt_tx_ping, "PING")
#define rxrpc_rtt_rx_traces \ #define rxrpc_rtt_rx_traces \
EM(rxrpc_rtt_rx_cancel, "CNCL") \
EM(rxrpc_rtt_rx_obsolete, "OBSL") \
EM(rxrpc_rtt_rx_lost, "LOST") \
EM(rxrpc_rtt_rx_ping_response, "PONG") \ EM(rxrpc_rtt_rx_ping_response, "PONG") \
E_(rxrpc_rtt_rx_requested_ack, "RACK") E_(rxrpc_rtt_rx_requested_ack, "RACK")
@ -1087,38 +1097,43 @@ TRACE_EVENT(rxrpc_recvmsg,
TRACE_EVENT(rxrpc_rtt_tx, TRACE_EVENT(rxrpc_rtt_tx,
TP_PROTO(struct rxrpc_call *call, enum rxrpc_rtt_tx_trace why, TP_PROTO(struct rxrpc_call *call, enum rxrpc_rtt_tx_trace why,
rxrpc_serial_t send_serial), int slot, rxrpc_serial_t send_serial),
TP_ARGS(call, why, send_serial), TP_ARGS(call, why, slot, send_serial),
TP_STRUCT__entry( TP_STRUCT__entry(
__field(unsigned int, call ) __field(unsigned int, call )
__field(enum rxrpc_rtt_tx_trace, why ) __field(enum rxrpc_rtt_tx_trace, why )
__field(int, slot )
__field(rxrpc_serial_t, send_serial ) __field(rxrpc_serial_t, send_serial )
), ),
TP_fast_assign( TP_fast_assign(
__entry->call = call->debug_id; __entry->call = call->debug_id;
__entry->why = why; __entry->why = why;
__entry->slot = slot;
__entry->send_serial = send_serial; __entry->send_serial = send_serial;
), ),
TP_printk("c=%08x %s sr=%08x", TP_printk("c=%08x [%d] %s sr=%08x",
__entry->call, __entry->call,
__entry->slot,
__print_symbolic(__entry->why, rxrpc_rtt_tx_traces), __print_symbolic(__entry->why, rxrpc_rtt_tx_traces),
__entry->send_serial) __entry->send_serial)
); );
TRACE_EVENT(rxrpc_rtt_rx, TRACE_EVENT(rxrpc_rtt_rx,
TP_PROTO(struct rxrpc_call *call, enum rxrpc_rtt_rx_trace why, TP_PROTO(struct rxrpc_call *call, enum rxrpc_rtt_rx_trace why,
int slot,
rxrpc_serial_t send_serial, rxrpc_serial_t resp_serial, rxrpc_serial_t send_serial, rxrpc_serial_t resp_serial,
u32 rtt, u32 rto), u32 rtt, u32 rto),
TP_ARGS(call, why, send_serial, resp_serial, rtt, rto), TP_ARGS(call, why, slot, send_serial, resp_serial, rtt, rto),
TP_STRUCT__entry( TP_STRUCT__entry(
__field(unsigned int, call ) __field(unsigned int, call )
__field(enum rxrpc_rtt_rx_trace, why ) __field(enum rxrpc_rtt_rx_trace, why )
__field(int, slot )
__field(rxrpc_serial_t, send_serial ) __field(rxrpc_serial_t, send_serial )
__field(rxrpc_serial_t, resp_serial ) __field(rxrpc_serial_t, resp_serial )
__field(u32, rtt ) __field(u32, rtt )
@ -1128,14 +1143,16 @@ TRACE_EVENT(rxrpc_rtt_rx,
TP_fast_assign( TP_fast_assign(
__entry->call = call->debug_id; __entry->call = call->debug_id;
__entry->why = why; __entry->why = why;
__entry->slot = slot;
__entry->send_serial = send_serial; __entry->send_serial = send_serial;
__entry->resp_serial = resp_serial; __entry->resp_serial = resp_serial;
__entry->rtt = rtt; __entry->rtt = rtt;
__entry->rto = rto; __entry->rto = rto;
), ),
TP_printk("c=%08x %s sr=%08x rr=%08x rtt=%u rto=%u", TP_printk("c=%08x [%d] %s sr=%08x rr=%08x rtt=%u rto=%u",
__entry->call, __entry->call,
__entry->slot,
__print_symbolic(__entry->why, rxrpc_rtt_rx_traces), __print_symbolic(__entry->why, rxrpc_rtt_rx_traces),
__entry->send_serial, __entry->send_serial,
__entry->resp_serial, __entry->resp_serial,

View file

@ -135,7 +135,7 @@ struct in_addr {
* this socket to prevent accepting spoofed ones. * this socket to prevent accepting spoofed ones.
*/ */
#define IP_PMTUDISC_INTERFACE 4 #define IP_PMTUDISC_INTERFACE 4
/* weaker version of IP_PMTUDISC_INTERFACE, which allos packets to get /* weaker version of IP_PMTUDISC_INTERFACE, which allows packets to get
* fragmented if they exeed the interface mtu * fragmented if they exeed the interface mtu
*/ */
#define IP_PMTUDISC_OMIT 5 #define IP_PMTUDISC_OMIT 5

View file

@ -133,7 +133,7 @@ enum nf_tables_msg_types {
* @NFTA_LIST_ELEM: list element (NLA_NESTED) * @NFTA_LIST_ELEM: list element (NLA_NESTED)
*/ */
enum nft_list_attributes { enum nft_list_attributes {
NFTA_LIST_UNPEC, NFTA_LIST_UNSPEC,
NFTA_LIST_ELEM, NFTA_LIST_ELEM,
__NFTA_LIST_MAX __NFTA_LIST_MAX
}; };

View file

@ -2634,7 +2634,7 @@ static int bpf_raw_tp_link_fill_link_info(const struct bpf_link *link,
u32 ulen = info->raw_tracepoint.tp_name_len; u32 ulen = info->raw_tracepoint.tp_name_len;
size_t tp_len = strlen(tp_name); size_t tp_len = strlen(tp_name);
if (ulen && !ubuf) if (!ulen ^ !ubuf)
return -EINVAL; return -EINVAL;
info->raw_tracepoint.tp_name_len = tp_len + 1; info->raw_tracepoint.tp_name_len = tp_len + 1;

View file

@ -204,8 +204,7 @@ static int max_extfrag_threshold = 1000;
#if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_SYSCTL) #if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_SYSCTL)
static int bpf_stats_handler(struct ctl_table *table, int write, static int bpf_stats_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, void *buffer, size_t *lenp, loff_t *ppos)
loff_t *ppos)
{ {
struct static_key *key = (struct static_key *)table->data; struct static_key *key = (struct static_key *)table->data;
static int saved_val; static int saved_val;

View file

@ -881,6 +881,12 @@ static void batadv_v_ogm_process(const struct sk_buff *skb, int ogm_offset,
ntohl(ogm_packet->seqno), ogm_throughput, ogm_packet->ttl, ntohl(ogm_packet->seqno), ogm_throughput, ogm_packet->ttl,
ogm_packet->version, ntohs(ogm_packet->tvlv_len)); ogm_packet->version, ntohs(ogm_packet->tvlv_len));
if (batadv_is_my_mac(bat_priv, ogm_packet->orig)) {
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
"Drop packet: originator packet from ourself\n");
return;
}
/* If the throughput metric is 0, immediately drop the packet. No need /* If the throughput metric is 0, immediately drop the packet. No need
* to create orig_node / neigh_node for an unusable route. * to create orig_node / neigh_node for an unusable route.
*/ */
@ -1008,11 +1014,6 @@ int batadv_v_ogm_packet_recv(struct sk_buff *skb,
if (batadv_is_my_mac(bat_priv, ethhdr->h_source)) if (batadv_is_my_mac(bat_priv, ethhdr->h_source))
goto free_skb; goto free_skb;
ogm_packet = (struct batadv_ogm2_packet *)skb->data;
if (batadv_is_my_mac(bat_priv, ogm_packet->orig))
goto free_skb;
batadv_inc_counter(bat_priv, BATADV_CNT_MGMT_RX); batadv_inc_counter(bat_priv, BATADV_CNT_MGMT_RX);
batadv_add_counter(bat_priv, BATADV_CNT_MGMT_RX_BYTES, batadv_add_counter(bat_priv, BATADV_CNT_MGMT_RX_BYTES,
skb->len + ETH_HLEN); skb->len + ETH_HLEN);

View file

@ -437,7 +437,10 @@ static void batadv_bla_send_claim(struct batadv_priv *bat_priv, u8 *mac,
batadv_add_counter(bat_priv, BATADV_CNT_RX_BYTES, batadv_add_counter(bat_priv, BATADV_CNT_RX_BYTES,
skb->len + ETH_HLEN); skb->len + ETH_HLEN);
if (in_interrupt())
netif_rx(skb); netif_rx(skb);
else
netif_rx_ni(skb);
out: out:
if (primary_if) if (primary_if)
batadv_hardif_put(primary_if); batadv_hardif_put(primary_if);

View file

@ -703,8 +703,10 @@ batadv_gw_dhcp_recipient_get(struct sk_buff *skb, unsigned int *header_len,
chaddr_offset = *header_len + BATADV_DHCP_CHADDR_OFFSET; chaddr_offset = *header_len + BATADV_DHCP_CHADDR_OFFSET;
/* store the client address if the message is going to a client */ /* store the client address if the message is going to a client */
if (ret == BATADV_DHCP_TO_CLIENT && if (ret == BATADV_DHCP_TO_CLIENT) {
pskb_may_pull(skb, chaddr_offset + ETH_ALEN)) { if (!pskb_may_pull(skb, chaddr_offset + ETH_ALEN))
return BATADV_DHCP_NO;
/* check if the DHCP packet carries an Ethernet DHCP */ /* check if the DHCP packet carries an Ethernet DHCP */
p = skb->data + *header_len + BATADV_DHCP_HTYPE_OFFSET; p = skb->data + *header_len + BATADV_DHCP_HTYPE_OFFSET;
if (*p != BATADV_DHCP_HTYPE_ETHERNET) if (*p != BATADV_DHCP_HTYPE_ETHERNET)

View file

@ -116,7 +116,7 @@ static int cfrfml_receive(struct cflayer *layr, struct cfpkt *pkt)
if (segmented) { if (segmented) {
if (rfml->incomplete_frm == NULL) { if (rfml->incomplete_frm == NULL) {
/* Initial Segment */ /* Initial Segment */
if (cfpkt_peek_head(pkt, rfml->seghead, 6) < 0) if (cfpkt_peek_head(pkt, rfml->seghead, 6) != 0)
goto out; goto out;
rfml->pdu_size = get_unaligned_le16(rfml->seghead+4); rfml->pdu_size = get_unaligned_le16(rfml->seghead+4);
@ -233,7 +233,7 @@ static int cfrfml_transmit(struct cflayer *layr, struct cfpkt *pkt)
if (cfpkt_getlen(pkt) > rfml->fragment_size + RFM_HEAD_SIZE) if (cfpkt_getlen(pkt) > rfml->fragment_size + RFM_HEAD_SIZE)
err = cfpkt_peek_head(pkt, head, 6); err = cfpkt_peek_head(pkt, head, 6);
if (err < 0) if (err != 0)
goto out; goto out;
while (cfpkt_getlen(frontpkt) > rfml->fragment_size + RFM_HEAD_SIZE) { while (cfpkt_getlen(frontpkt) > rfml->fragment_size + RFM_HEAD_SIZE) {

View file

@ -6612,12 +6612,13 @@ void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
netdev_err_once(dev, "%s() called with weight %d\n", __func__, netdev_err_once(dev, "%s() called with weight %d\n", __func__,
weight); weight);
napi->weight = weight; napi->weight = weight;
list_add(&napi->dev_list, &dev->napi_list);
napi->dev = dev; napi->dev = dev;
#ifdef CONFIG_NETPOLL #ifdef CONFIG_NETPOLL
napi->poll_owner = -1; napi->poll_owner = -1;
#endif #endif
set_bit(NAPI_STATE_SCHED, &napi->state); set_bit(NAPI_STATE_SCHED, &napi->state);
set_bit(NAPI_STATE_NPSVC, &napi->state);
list_add_rcu(&napi->dev_list, &dev->napi_list);
napi_hash_add(napi); napi_hash_add(napi);
} }
EXPORT_SYMBOL(netif_napi_add); EXPORT_SYMBOL(netif_napi_add);

View file

@ -162,7 +162,7 @@ static void poll_napi(struct net_device *dev)
struct napi_struct *napi; struct napi_struct *napi;
int cpu = smp_processor_id(); int cpu = smp_processor_id();
list_for_each_entry(napi, &dev->napi_list, dev_list) { list_for_each_entry_rcu(napi, &dev->napi_list, dev_list) {
if (cmpxchg(&napi->poll_owner, -1, cpu) == -1) { if (cmpxchg(&napi->poll_owner, -1, cpu) == -1) {
poll_one_napi(napi); poll_one_napi(napi);
smp_store_release(&napi->poll_owner, -1); smp_store_release(&napi->poll_owner, -1);

View file

@ -3699,7 +3699,7 @@ static int __net_init pktgen_create_thread(int cpu, struct pktgen_net *pn)
cpu_to_node(cpu), cpu_to_node(cpu),
"kpktgend_%d", cpu); "kpktgend_%d", cpu);
if (IS_ERR(p)) { if (IS_ERR(p)) {
pr_err("kernel_thread() failed for cpu %d\n", t->cpu); pr_err("kthread_create_on_node() failed for cpu %d\n", t->cpu);
list_del(&t->th_list); list_del(&t->th_list);
kfree(t); kfree(t);
return PTR_ERR(p); return PTR_ERR(p);

View file

@ -820,6 +820,7 @@ void skb_tx_error(struct sk_buff *skb)
} }
EXPORT_SYMBOL(skb_tx_error); EXPORT_SYMBOL(skb_tx_error);
#ifdef CONFIG_TRACEPOINTS
/** /**
* consume_skb - free an skbuff * consume_skb - free an skbuff
* @skb: buffer to free * @skb: buffer to free
@ -837,6 +838,7 @@ void consume_skb(struct sk_buff *skb)
__kfree_skb(skb); __kfree_skb(skb);
} }
EXPORT_SYMBOL(consume_skb); EXPORT_SYMBOL(consume_skb);
#endif
/** /**
* consume_stateless_skb - free an skbuff, assuming it is stateless * consume_stateless_skb - free an skbuff, assuming it is stateless

View file

@ -3254,7 +3254,7 @@ void sk_common_release(struct sock *sk)
sk->sk_prot->destroy(sk); sk->sk_prot->destroy(sk);
/* /*
* Observation: when sock_common_release is called, processes have * Observation: when sk_common_release is called, processes have
* no access to socket. But net still has. * no access to socket. But net still has.
* Step one, detach it from networking: * Step one, detach it from networking:
* *

View file

@ -2121,7 +2121,8 @@ void fib_info_notify_update(struct net *net, struct nl_info *info)
struct hlist_head *head = &net->ipv4.fib_table_hash[h]; struct hlist_head *head = &net->ipv4.fib_table_hash[h];
struct fib_table *tb; struct fib_table *tb;
hlist_for_each_entry_rcu(tb, head, tb_hlist) hlist_for_each_entry_rcu(tb, head, tb_hlist,
lockdep_rtnl_is_held())
__fib_info_notify_update(net, tb, info); __fib_info_notify_update(net, tb, info);
} }
} }

View file

@ -3,7 +3,7 @@
* nf_nat_pptp.c * nf_nat_pptp.c
* *
* NAT support for PPTP (Point to Point Tunneling Protocol). * NAT support for PPTP (Point to Point Tunneling Protocol).
* PPTP is a a protocol for creating virtual private networks. * PPTP is a protocol for creating virtual private networks.
* It is a specification defined by Microsoft and some vendors * It is a specification defined by Microsoft and some vendors
* working with Microsoft. PPTP is built on top of a modified * working with Microsoft. PPTP is built on top of a modified
* version of the Internet Generic Routing Encapsulation Protocol. * version of the Internet Generic Routing Encapsulation Protocol.

View file

@ -610,7 +610,7 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
} else if (!ipc.oif) { } else if (!ipc.oif) {
ipc.oif = inet->uc_index; ipc.oif = inet->uc_index;
} else if (ipv4_is_lbcast(daddr) && inet->uc_index) { } else if (ipv4_is_lbcast(daddr) && inet->uc_index) {
/* oif is set, packet is to local broadcast and /* oif is set, packet is to local broadcast
* and uc_index is set. oif is most likely set * and uc_index is set. oif is most likely set
* by sk_bound_dev_if. If uc_index != oif check if the * by sk_bound_dev_if. If uc_index != oif check if the
* oif is an L3 master and uc_index is an L3 slave. * oif is an L3 master and uc_index is an L3 slave.

View file

@ -21,6 +21,7 @@
#include <net/calipso.h> #include <net/calipso.h>
#endif #endif
static int two = 2;
static int flowlabel_reflect_max = 0x7; static int flowlabel_reflect_max = 0x7;
static int auto_flowlabels_min; static int auto_flowlabels_min;
static int auto_flowlabels_max = IP6_AUTO_FLOW_LABEL_MAX; static int auto_flowlabels_max = IP6_AUTO_FLOW_LABEL_MAX;
@ -150,7 +151,7 @@ static struct ctl_table ipv6_table_template[] = {
.mode = 0644, .mode = 0644,
.proc_handler = proc_rt6_multipath_hash_policy, .proc_handler = proc_rt6_multipath_hash_policy,
.extra1 = SYSCTL_ZERO, .extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_ONE, .extra2 = &two,
}, },
{ {
.procname = "seg6_flowlabel", .procname = "seg6_flowlabel",

View file

@ -154,7 +154,7 @@ int l3mdev_master_upper_ifindex_by_index_rcu(struct net *net, int ifindex)
EXPORT_SYMBOL_GPL(l3mdev_master_upper_ifindex_by_index_rcu); EXPORT_SYMBOL_GPL(l3mdev_master_upper_ifindex_by_index_rcu);
/** /**
* l3mdev_fib_table - get FIB table id associated with an L3 * l3mdev_fib_table_rcu - get FIB table id associated with an L3
* master interface * master interface
* @dev: targeted interface * @dev: targeted interface
*/ */

View file

@ -405,18 +405,14 @@ ieee80211_calc_legacy_rate_duration(u16 bitrate, bool short_pre,
return duration; return duration;
} }
u32 ieee80211_calc_rx_airtime(struct ieee80211_hw *hw, static u32 ieee80211_get_rate_duration(struct ieee80211_hw *hw,
struct ieee80211_rx_status *status, struct ieee80211_rx_status *status,
int len) u32 *overhead)
{ {
struct ieee80211_supported_band *sband;
const struct ieee80211_rate *rate;
bool sgi = status->enc_flags & RX_ENC_FLAG_SHORT_GI; bool sgi = status->enc_flags & RX_ENC_FLAG_SHORT_GI;
bool sp = status->enc_flags & RX_ENC_FLAG_SHORTPRE;
int bw, streams; int bw, streams;
int group, idx; int group, idx;
u32 duration; u32 duration;
bool cck;
switch (status->bw) { switch (status->bw) {
case RATE_INFO_BW_20: case RATE_INFO_BW_20:
@ -437,20 +433,6 @@ u32 ieee80211_calc_rx_airtime(struct ieee80211_hw *hw,
} }
switch (status->encoding) { switch (status->encoding) {
case RX_ENC_LEGACY:
if (WARN_ON_ONCE(status->band > NL80211_BAND_5GHZ))
return 0;
sband = hw->wiphy->bands[status->band];
if (!sband || status->rate_idx >= sband->n_bitrates)
return 0;
rate = &sband->bitrates[status->rate_idx];
cck = rate->flags & IEEE80211_RATE_MANDATORY_B;
return ieee80211_calc_legacy_rate_duration(rate->bitrate, sp,
cck, len);
case RX_ENC_VHT: case RX_ENC_VHT:
streams = status->nss; streams = status->nss;
idx = status->rate_idx; idx = status->rate_idx;
@ -477,51 +459,144 @@ u32 ieee80211_calc_rx_airtime(struct ieee80211_hw *hw,
duration = airtime_mcs_groups[group].duration[idx]; duration = airtime_mcs_groups[group].duration[idx];
duration <<= airtime_mcs_groups[group].shift; duration <<= airtime_mcs_groups[group].shift;
*overhead = 36 + (streams << 2);
return duration;
}
u32 ieee80211_calc_rx_airtime(struct ieee80211_hw *hw,
struct ieee80211_rx_status *status,
int len)
{
struct ieee80211_supported_band *sband;
u32 duration, overhead = 0;
if (status->encoding == RX_ENC_LEGACY) {
const struct ieee80211_rate *rate;
bool sp = status->enc_flags & RX_ENC_FLAG_SHORTPRE;
bool cck;
if (WARN_ON_ONCE(status->band > NL80211_BAND_5GHZ))
return 0;
sband = hw->wiphy->bands[status->band];
if (!sband || status->rate_idx >= sband->n_bitrates)
return 0;
rate = &sband->bitrates[status->rate_idx];
cck = rate->flags & IEEE80211_RATE_MANDATORY_B;
return ieee80211_calc_legacy_rate_duration(rate->bitrate, sp,
cck, len);
}
duration = ieee80211_get_rate_duration(hw, status, &overhead);
if (!duration)
return 0;
duration *= len; duration *= len;
duration /= AVG_PKT_SIZE; duration /= AVG_PKT_SIZE;
duration /= 1024; duration /= 1024;
duration += 36 + (streams << 2); return duration + overhead;
return duration;
} }
EXPORT_SYMBOL_GPL(ieee80211_calc_rx_airtime); EXPORT_SYMBOL_GPL(ieee80211_calc_rx_airtime);
static u32 ieee80211_calc_tx_airtime_rate(struct ieee80211_hw *hw, static bool ieee80211_fill_rate_info(struct ieee80211_hw *hw,
struct ieee80211_tx_rate *rate, struct ieee80211_rx_status *stat, u8 band,
u8 band, int len) struct rate_info *ri)
{ {
struct ieee80211_rx_status stat = { struct ieee80211_supported_band *sband = hw->wiphy->bands[band];
.band = band, int i;
};
if (rate->idx < 0 || !rate->count) if (!ri || !sband)
return false;
stat->bw = ri->bw;
stat->nss = ri->nss;
stat->rate_idx = ri->mcs;
if (ri->flags & RATE_INFO_FLAGS_HE_MCS)
stat->encoding = RX_ENC_HE;
else if (ri->flags & RATE_INFO_FLAGS_VHT_MCS)
stat->encoding = RX_ENC_VHT;
else if (ri->flags & RATE_INFO_FLAGS_MCS)
stat->encoding = RX_ENC_HT;
else
stat->encoding = RX_ENC_LEGACY;
if (ri->flags & RATE_INFO_FLAGS_SHORT_GI)
stat->enc_flags |= RX_ENC_FLAG_SHORT_GI;
stat->he_gi = ri->he_gi;
if (stat->encoding != RX_ENC_LEGACY)
return true;
stat->rate_idx = 0;
for (i = 0; i < sband->n_bitrates; i++) {
if (ri->legacy != sband->bitrates[i].bitrate)
continue;
stat->rate_idx = i;
return true;
}
return false;
}
static int ieee80211_fill_rx_status(struct ieee80211_rx_status *stat,
struct ieee80211_hw *hw,
struct ieee80211_tx_rate *rate,
struct rate_info *ri, u8 band, int len)
{
memset(stat, 0, sizeof(*stat));
stat->band = band;
if (ieee80211_fill_rate_info(hw, stat, band, ri))
return 0; return 0;
if (rate->idx < 0 || !rate->count)
return -1;
if (rate->flags & IEEE80211_TX_RC_80_MHZ_WIDTH) if (rate->flags & IEEE80211_TX_RC_80_MHZ_WIDTH)
stat.bw = RATE_INFO_BW_80; stat->bw = RATE_INFO_BW_80;
else if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) else if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
stat.bw = RATE_INFO_BW_40; stat->bw = RATE_INFO_BW_40;
else else
stat.bw = RATE_INFO_BW_20; stat->bw = RATE_INFO_BW_20;
stat.enc_flags = 0; stat->enc_flags = 0;
if (rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) if (rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
stat.enc_flags |= RX_ENC_FLAG_SHORTPRE; stat->enc_flags |= RX_ENC_FLAG_SHORTPRE;
if (rate->flags & IEEE80211_TX_RC_SHORT_GI) if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
stat.enc_flags |= RX_ENC_FLAG_SHORT_GI; stat->enc_flags |= RX_ENC_FLAG_SHORT_GI;
stat.rate_idx = rate->idx; stat->rate_idx = rate->idx;
if (rate->flags & IEEE80211_TX_RC_VHT_MCS) { if (rate->flags & IEEE80211_TX_RC_VHT_MCS) {
stat.encoding = RX_ENC_VHT; stat->encoding = RX_ENC_VHT;
stat.rate_idx = ieee80211_rate_get_vht_mcs(rate); stat->rate_idx = ieee80211_rate_get_vht_mcs(rate);
stat.nss = ieee80211_rate_get_vht_nss(rate); stat->nss = ieee80211_rate_get_vht_nss(rate);
} else if (rate->flags & IEEE80211_TX_RC_MCS) { } else if (rate->flags & IEEE80211_TX_RC_MCS) {
stat.encoding = RX_ENC_HT; stat->encoding = RX_ENC_HT;
} else { } else {
stat.encoding = RX_ENC_LEGACY; stat->encoding = RX_ENC_LEGACY;
} }
return 0;
}
static u32 ieee80211_calc_tx_airtime_rate(struct ieee80211_hw *hw,
struct ieee80211_tx_rate *rate,
struct rate_info *ri,
u8 band, int len)
{
struct ieee80211_rx_status stat;
if (ieee80211_fill_rx_status(&stat, hw, rate, ri, band, len))
return 0;
return ieee80211_calc_rx_airtime(hw, &stat, len); return ieee80211_calc_rx_airtime(hw, &stat, len);
} }
@ -536,7 +611,7 @@ u32 ieee80211_calc_tx_airtime(struct ieee80211_hw *hw,
struct ieee80211_tx_rate *rate = &info->status.rates[i]; struct ieee80211_tx_rate *rate = &info->status.rates[i];
u32 cur_duration; u32 cur_duration;
cur_duration = ieee80211_calc_tx_airtime_rate(hw, rate, cur_duration = ieee80211_calc_tx_airtime_rate(hw, rate, NULL,
info->band, len); info->band, len);
if (!cur_duration) if (!cur_duration)
break; break;
@ -572,26 +647,41 @@ u32 ieee80211_calc_expected_tx_airtime(struct ieee80211_hw *hw,
if (pubsta) { if (pubsta) {
struct sta_info *sta = container_of(pubsta, struct sta_info, struct sta_info *sta = container_of(pubsta, struct sta_info,
sta); sta);
struct ieee80211_rx_status stat;
struct ieee80211_tx_rate *rate = &sta->tx_stats.last_rate; struct ieee80211_tx_rate *rate = &sta->tx_stats.last_rate;
u32 airtime; struct rate_info *ri = &sta->tx_stats.last_rate_info;
u32 duration, overhead;
u8 agg_shift;
if (!(rate->flags & (IEEE80211_TX_RC_VHT_MCS | if (ieee80211_fill_rx_status(&stat, hw, rate, ri, band, len))
IEEE80211_TX_RC_MCS))) return 0;
ampdu = false;
if (stat.encoding == RX_ENC_LEGACY || !ampdu)
return ieee80211_calc_rx_airtime(hw, &stat, len);
duration = ieee80211_get_rate_duration(hw, &stat, &overhead);
/* /*
* Assume that HT/VHT transmission on any AC except VO will * Assume that HT/VHT transmission on any AC except VO will
* use aggregation. Since we don't have reliable reporting * use aggregation. Since we don't have reliable reporting
* of aggregation length, assume an average of 16. * of aggregation length, assume an average size based on the
* tx rate.
* This will not be very accurate, but much better than simply * This will not be very accurate, but much better than simply
* assuming un-aggregated tx. * assuming un-aggregated tx in all cases.
*/ */
airtime = ieee80211_calc_tx_airtime_rate(hw, rate, band, if (duration > 400) /* <= VHT20 MCS2 1S */
ampdu ? len * 16 : len); agg_shift = 1;
if (ampdu) else if (duration > 250) /* <= VHT20 MCS3 1S or MCS1 2S */
airtime /= 16; agg_shift = 2;
else if (duration > 150) /* <= VHT20 MCS5 1S or MCS3 2S */
agg_shift = 3;
else
agg_shift = 4;
return airtime; duration *= len;
duration /= AVG_PKT_SIZE;
duration /= 1024;
return duration + (overhead >> agg_shift);
} }
if (!conf) if (!conf)

View file

@ -524,7 +524,7 @@ struct ieee80211_sta_rx_stats {
* @status_stats.retry_failed: # of frames that failed after retry * @status_stats.retry_failed: # of frames that failed after retry
* @status_stats.retry_count: # of retries attempted * @status_stats.retry_count: # of retries attempted
* @status_stats.lost_packets: # of lost packets * @status_stats.lost_packets: # of lost packets
* @status_stats.last_tdls_pkt_time: timestamp of last TDLS packet * @status_stats.last_pkt_time: timestamp of last ACKed packet
* @status_stats.msdu_retries: # of MSDU retries * @status_stats.msdu_retries: # of MSDU retries
* @status_stats.msdu_failed: # of failed MSDUs * @status_stats.msdu_failed: # of failed MSDUs
* @status_stats.last_ack: last ack timestamp (jiffies) * @status_stats.last_ack: last ack timestamp (jiffies)
@ -597,7 +597,7 @@ struct sta_info {
unsigned long filtered; unsigned long filtered;
unsigned long retry_failed, retry_count; unsigned long retry_failed, retry_count;
unsigned int lost_packets; unsigned int lost_packets;
unsigned long last_tdls_pkt_time; unsigned long last_pkt_time;
u64 msdu_retries[IEEE80211_NUM_TIDS + 1]; u64 msdu_retries[IEEE80211_NUM_TIDS + 1];
u64 msdu_failed[IEEE80211_NUM_TIDS + 1]; u64 msdu_failed[IEEE80211_NUM_TIDS + 1];
unsigned long last_ack; unsigned long last_ack;
@ -611,6 +611,7 @@ struct sta_info {
u64 packets[IEEE80211_NUM_ACS]; u64 packets[IEEE80211_NUM_ACS];
u64 bytes[IEEE80211_NUM_ACS]; u64 bytes[IEEE80211_NUM_ACS];
struct ieee80211_tx_rate last_rate; struct ieee80211_tx_rate last_rate;
struct rate_info last_rate_info;
u64 msdu[IEEE80211_NUM_TIDS + 1]; u64 msdu[IEEE80211_NUM_TIDS + 1];
} tx_stats; } tx_stats;
u16 tid_seq[IEEE80211_QOS_CTL_TID_MASK + 1]; u16 tid_seq[IEEE80211_QOS_CTL_TID_MASK + 1];

View file

@ -755,12 +755,16 @@ static void ieee80211_report_used_skb(struct ieee80211_local *local,
* - current throughput (higher value for higher tpt)? * - current throughput (higher value for higher tpt)?
*/ */
#define STA_LOST_PKT_THRESHOLD 50 #define STA_LOST_PKT_THRESHOLD 50
#define STA_LOST_PKT_TIME HZ /* 1 sec since last ACK */
#define STA_LOST_TDLS_PKT_THRESHOLD 10 #define STA_LOST_TDLS_PKT_THRESHOLD 10
#define STA_LOST_TDLS_PKT_TIME (10*HZ) /* 10secs since last ACK */ #define STA_LOST_TDLS_PKT_TIME (10*HZ) /* 10secs since last ACK */
static void ieee80211_lost_packet(struct sta_info *sta, static void ieee80211_lost_packet(struct sta_info *sta,
struct ieee80211_tx_info *info) struct ieee80211_tx_info *info)
{ {
unsigned long pkt_time = STA_LOST_PKT_TIME;
unsigned int pkt_thr = STA_LOST_PKT_THRESHOLD;
/* If driver relies on its own algorithm for station kickout, skip /* If driver relies on its own algorithm for station kickout, skip
* mac80211 packet loss mechanism. * mac80211 packet loss mechanism.
*/ */
@ -773,21 +777,20 @@ static void ieee80211_lost_packet(struct sta_info *sta,
return; return;
sta->status_stats.lost_packets++; sta->status_stats.lost_packets++;
if (!sta->sta.tdls && if (sta->sta.tdls) {
sta->status_stats.lost_packets < STA_LOST_PKT_THRESHOLD) pkt_time = STA_LOST_TDLS_PKT_TIME;
return; pkt_thr = STA_LOST_PKT_THRESHOLD;
}
/* /*
* If we're in TDLS mode, make sure that all STA_LOST_TDLS_PKT_THRESHOLD * If we're in TDLS mode, make sure that all STA_LOST_TDLS_PKT_THRESHOLD
* of the last packets were lost, and that no ACK was received in the * of the last packets were lost, and that no ACK was received in the
* last STA_LOST_TDLS_PKT_TIME ms, before triggering the CQM packet-loss * last STA_LOST_TDLS_PKT_TIME ms, before triggering the CQM packet-loss
* mechanism. * mechanism.
* For non-TDLS, use STA_LOST_PKT_THRESHOLD and STA_LOST_PKT_TIME
*/ */
if (sta->sta.tdls && if (sta->status_stats.lost_packets < pkt_thr ||
(sta->status_stats.lost_packets < STA_LOST_TDLS_PKT_THRESHOLD || !time_after(jiffies, sta->status_stats.last_pkt_time + pkt_time))
time_before(jiffies,
sta->status_stats.last_tdls_pkt_time +
STA_LOST_TDLS_PKT_TIME)))
return; return;
cfg80211_cqm_pktloss_notify(sta->sdata->dev, sta->sta.addr, cfg80211_cqm_pktloss_notify(sta->sdata->dev, sta->sta.addr,
@ -1033,9 +1036,7 @@ static void __ieee80211_tx_status(struct ieee80211_hw *hw,
sta->status_stats.lost_packets = 0; sta->status_stats.lost_packets = 0;
/* Track when last TDLS packet was ACKed */ /* Track when last TDLS packet was ACKed */
if (test_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH)) sta->status_stats.last_pkt_time = jiffies;
sta->status_stats.last_tdls_pkt_time =
jiffies;
} else if (noack_success) { } else if (noack_success) {
/* nothing to do here, do not account as lost */ /* nothing to do here, do not account as lost */
} else { } else {
@ -1137,9 +1138,17 @@ void ieee80211_tx_status_ext(struct ieee80211_hw *hw,
struct ieee80211_tx_info *info = status->info; struct ieee80211_tx_info *info = status->info;
struct ieee80211_sta *pubsta = status->sta; struct ieee80211_sta *pubsta = status->sta;
struct ieee80211_supported_band *sband; struct ieee80211_supported_band *sband;
struct sta_info *sta;
int retry_count; int retry_count;
bool acked, noack_success; bool acked, noack_success;
if (pubsta) {
sta = container_of(pubsta, struct sta_info, sta);
if (status->rate)
sta->tx_stats.last_rate_info = *status->rate;
}
if (status->skb) if (status->skb)
return __ieee80211_tx_status(hw, status); return __ieee80211_tx_status(hw, status);
@ -1154,10 +1163,6 @@ void ieee80211_tx_status_ext(struct ieee80211_hw *hw,
noack_success = !!(info->flags & IEEE80211_TX_STAT_NOACK_TRANSMITTED); noack_success = !!(info->flags & IEEE80211_TX_STAT_NOACK_TRANSMITTED);
if (pubsta) { if (pubsta) {
struct sta_info *sta;
sta = container_of(pubsta, struct sta_info, sta);
if (!acked && !noack_success) if (!acked && !noack_success)
sta->status_stats.retry_failed++; sta->status_stats.retry_failed++;
sta->status_stats.retry_count += retry_count; sta->status_stats.retry_count += retry_count;
@ -1168,9 +1173,8 @@ void ieee80211_tx_status_ext(struct ieee80211_hw *hw,
if (sta->status_stats.lost_packets) if (sta->status_stats.lost_packets)
sta->status_stats.lost_packets = 0; sta->status_stats.lost_packets = 0;
/* Track when last TDLS packet was ACKed */ /* Track when last packet was ACKed */
if (test_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH)) sta->status_stats.last_pkt_time = jiffies;
sta->status_stats.last_tdls_pkt_time = jiffies;
} else if (test_sta_flag(sta, WLAN_STA_PS_STA)) { } else if (test_sta_flag(sta, WLAN_STA_PS_STA)) {
return; return;
} else if (noack_success) { } else if (noack_success) {
@ -1259,8 +1263,7 @@ void ieee80211_tx_status_8023(struct ieee80211_hw *hw,
if (sta->status_stats.lost_packets) if (sta->status_stats.lost_packets)
sta->status_stats.lost_packets = 0; sta->status_stats.lost_packets = 0;
if (test_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH)) sta->status_stats.last_pkt_time = jiffies;
sta->status_stats.last_tdls_pkt_time = jiffies;
} else { } else {
ieee80211_lost_packet(sta, info); ieee80211_lost_packet(sta, info);
} }

View file

@ -891,7 +891,6 @@ restart:
goto out; goto out;
} }
wait_for_sndbuf:
__mptcp_flush_join_list(msk); __mptcp_flush_join_list(msk);
ssk = mptcp_subflow_get_send(msk); ssk = mptcp_subflow_get_send(msk);
while (!sk_stream_memory_free(sk) || while (!sk_stream_memory_free(sk) ||
@ -981,7 +980,7 @@ wait_for_sndbuf:
*/ */
mptcp_set_timeout(sk, ssk); mptcp_set_timeout(sk, ssk);
release_sock(ssk); release_sock(ssk);
goto wait_for_sndbuf; goto restart;
} }
} }
} }

View file

@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only // SPDX-License-Identifier: GPL-2.0-only
/* /*
* Connection tracking support for PPTP (Point to Point Tunneling Protocol). * Connection tracking support for PPTP (Point to Point Tunneling Protocol).
* PPTP is a a protocol for creating virtual private networks. * PPTP is a protocol for creating virtual private networks.
* It is a specification defined by Microsoft and some vendors * It is a specification defined by Microsoft and some vendors
* working with Microsoft. PPTP is built on top of a modified * working with Microsoft. PPTP is built on top of a modified
* version of the Internet Generic Routing Encapsulation Protocol. * version of the Internet Generic Routing Encapsulation Protocol.

View file

@ -62,6 +62,8 @@ static const unsigned int sctp_timeouts[SCTP_CONNTRACK_MAX] = {
[SCTP_CONNTRACK_HEARTBEAT_ACKED] = 210 SECS, [SCTP_CONNTRACK_HEARTBEAT_ACKED] = 210 SECS,
}; };
#define SCTP_FLAG_HEARTBEAT_VTAG_FAILED 1
#define sNO SCTP_CONNTRACK_NONE #define sNO SCTP_CONNTRACK_NONE
#define sCL SCTP_CONNTRACK_CLOSED #define sCL SCTP_CONNTRACK_CLOSED
#define sCW SCTP_CONNTRACK_COOKIE_WAIT #define sCW SCTP_CONNTRACK_COOKIE_WAIT
@ -369,6 +371,7 @@ int nf_conntrack_sctp_packet(struct nf_conn *ct,
u_int32_t offset, count; u_int32_t offset, count;
unsigned int *timeouts; unsigned int *timeouts;
unsigned long map[256 / sizeof(unsigned long)] = { 0 }; unsigned long map[256 / sizeof(unsigned long)] = { 0 };
bool ignore = false;
if (sctp_error(skb, dataoff, state)) if (sctp_error(skb, dataoff, state))
return -NF_ACCEPT; return -NF_ACCEPT;
@ -427,15 +430,39 @@ int nf_conntrack_sctp_packet(struct nf_conn *ct,
/* Sec 8.5.1 (D) */ /* Sec 8.5.1 (D) */
if (sh->vtag != ct->proto.sctp.vtag[dir]) if (sh->vtag != ct->proto.sctp.vtag[dir])
goto out_unlock; goto out_unlock;
} else if (sch->type == SCTP_CID_HEARTBEAT || } else if (sch->type == SCTP_CID_HEARTBEAT) {
sch->type == SCTP_CID_HEARTBEAT_ACK) { if (ct->proto.sctp.vtag[dir] == 0) {
pr_debug("Setting %d vtag %x for dir %d\n", sch->type, sh->vtag, dir);
ct->proto.sctp.vtag[dir] = sh->vtag;
} else if (sh->vtag != ct->proto.sctp.vtag[dir]) {
if (test_bit(SCTP_CID_DATA, map) || ignore)
goto out_unlock;
ct->proto.sctp.flags |= SCTP_FLAG_HEARTBEAT_VTAG_FAILED;
ct->proto.sctp.last_dir = dir;
ignore = true;
continue;
} else if (ct->proto.sctp.flags & SCTP_FLAG_HEARTBEAT_VTAG_FAILED) {
ct->proto.sctp.flags &= ~SCTP_FLAG_HEARTBEAT_VTAG_FAILED;
}
} else if (sch->type == SCTP_CID_HEARTBEAT_ACK) {
if (ct->proto.sctp.vtag[dir] == 0) { if (ct->proto.sctp.vtag[dir] == 0) {
pr_debug("Setting vtag %x for dir %d\n", pr_debug("Setting vtag %x for dir %d\n",
sh->vtag, dir); sh->vtag, dir);
ct->proto.sctp.vtag[dir] = sh->vtag; ct->proto.sctp.vtag[dir] = sh->vtag;
} else if (sh->vtag != ct->proto.sctp.vtag[dir]) { } else if (sh->vtag != ct->proto.sctp.vtag[dir]) {
pr_debug("Verification tag check failed\n"); if (test_bit(SCTP_CID_DATA, map) || ignore)
goto out_unlock; goto out_unlock;
if ((ct->proto.sctp.flags & SCTP_FLAG_HEARTBEAT_VTAG_FAILED) == 0 ||
ct->proto.sctp.last_dir == dir)
goto out_unlock;
ct->proto.sctp.flags &= ~SCTP_FLAG_HEARTBEAT_VTAG_FAILED;
ct->proto.sctp.vtag[dir] = sh->vtag;
ct->proto.sctp.vtag[!dir] = 0;
} else if (ct->proto.sctp.flags & SCTP_FLAG_HEARTBEAT_VTAG_FAILED) {
ct->proto.sctp.flags &= ~SCTP_FLAG_HEARTBEAT_VTAG_FAILED;
} }
} }
@ -470,6 +497,10 @@ int nf_conntrack_sctp_packet(struct nf_conn *ct,
} }
spin_unlock_bh(&ct->lock); spin_unlock_bh(&ct->lock);
/* allow but do not refresh timeout */
if (ignore)
return NF_ACCEPT;
timeouts = nf_ct_timeout_lookup(ct); timeouts = nf_ct_timeout_lookup(ct);
if (!timeouts) if (!timeouts)
timeouts = nf_sctp_pernet(nf_ct_net(ct))->timeouts; timeouts = nf_sctp_pernet(nf_ct_net(ct))->timeouts;

View file

@ -1152,7 +1152,7 @@ int nf_conntrack_tcp_packet(struct nf_conn *ct,
&& (old_state == TCP_CONNTRACK_SYN_RECV && (old_state == TCP_CONNTRACK_SYN_RECV
|| old_state == TCP_CONNTRACK_ESTABLISHED) || old_state == TCP_CONNTRACK_ESTABLISHED)
&& new_state == TCP_CONNTRACK_ESTABLISHED) { && new_state == TCP_CONNTRACK_ESTABLISHED) {
/* Set ASSURED if we see see valid ack in ESTABLISHED /* Set ASSURED if we see valid ack in ESTABLISHED
after SYN_RECV or a valid answer for a picked up after SYN_RECV or a valid answer for a picked up
connection. */ connection. */
set_bit(IPS_ASSURED_BIT, &ct->status); set_bit(IPS_ASSURED_BIT, &ct->status);

View file

@ -81,18 +81,6 @@ static bool udp_error(struct sk_buff *skb,
return false; return false;
} }
static void nf_conntrack_udp_refresh_unreplied(struct nf_conn *ct,
struct sk_buff *skb,
enum ip_conntrack_info ctinfo,
u32 extra_jiffies)
{
if (unlikely(ctinfo == IP_CT_ESTABLISHED_REPLY &&
ct->status & IPS_NAT_CLASH))
nf_ct_kill(ct);
else
nf_ct_refresh_acct(ct, ctinfo, skb, extra_jiffies);
}
/* Returns verdict for packet, and may modify conntracktype */ /* Returns verdict for packet, and may modify conntracktype */
int nf_conntrack_udp_packet(struct nf_conn *ct, int nf_conntrack_udp_packet(struct nf_conn *ct,
struct sk_buff *skb, struct sk_buff *skb,
@ -124,12 +112,15 @@ int nf_conntrack_udp_packet(struct nf_conn *ct,
nf_ct_refresh_acct(ct, ctinfo, skb, extra); nf_ct_refresh_acct(ct, ctinfo, skb, extra);
/* never set ASSURED for IPS_NAT_CLASH, they time out soon */
if (unlikely((ct->status & IPS_NAT_CLASH)))
return NF_ACCEPT;
/* Also, more likely to be important, and not a probe */ /* Also, more likely to be important, and not a probe */
if (!test_and_set_bit(IPS_ASSURED_BIT, &ct->status)) if (!test_and_set_bit(IPS_ASSURED_BIT, &ct->status))
nf_conntrack_event_cache(IPCT_ASSURED, ct); nf_conntrack_event_cache(IPCT_ASSURED, ct);
} else { } else {
nf_conntrack_udp_refresh_unreplied(ct, skb, ctinfo, nf_ct_refresh_acct(ct, ctinfo, skb, timeouts[UDP_CT_UNREPLIED]);
timeouts[UDP_CT_UNREPLIED]);
} }
return NF_ACCEPT; return NF_ACCEPT;
} }
@ -206,12 +197,15 @@ int nf_conntrack_udplite_packet(struct nf_conn *ct,
if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) { if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) {
nf_ct_refresh_acct(ct, ctinfo, skb, nf_ct_refresh_acct(ct, ctinfo, skb,
timeouts[UDP_CT_REPLIED]); timeouts[UDP_CT_REPLIED]);
if (unlikely((ct->status & IPS_NAT_CLASH)))
return NF_ACCEPT;
/* Also, more likely to be important, and not a probe */ /* Also, more likely to be important, and not a probe */
if (!test_and_set_bit(IPS_ASSURED_BIT, &ct->status)) if (!test_and_set_bit(IPS_ASSURED_BIT, &ct->status))
nf_conntrack_event_cache(IPCT_ASSURED, ct); nf_conntrack_event_cache(IPCT_ASSURED, ct);
} else { } else {
nf_conntrack_udp_refresh_unreplied(ct, skb, ctinfo, nf_ct_refresh_acct(ct, ctinfo, skb, timeouts[UDP_CT_UNREPLIED]);
timeouts[UDP_CT_UNREPLIED]);
} }
return NF_ACCEPT; return NF_ACCEPT;
} }

View file

@ -815,11 +815,11 @@ static int nf_tables_gettable(struct net *net, struct sock *nlsk,
nlh->nlmsg_seq, NFT_MSG_NEWTABLE, 0, nlh->nlmsg_seq, NFT_MSG_NEWTABLE, 0,
family, table); family, table);
if (err < 0) if (err < 0)
goto err; goto err_fill_table_info;
return nlmsg_unicast(nlsk, skb2, NETLINK_CB(skb).portid); return nfnetlink_unicast(skb2, net, NETLINK_CB(skb).portid);
err: err_fill_table_info:
kfree_skb(skb2); kfree_skb(skb2);
return err; return err;
} }
@ -1563,11 +1563,11 @@ static int nf_tables_getchain(struct net *net, struct sock *nlsk,
nlh->nlmsg_seq, NFT_MSG_NEWCHAIN, 0, nlh->nlmsg_seq, NFT_MSG_NEWCHAIN, 0,
family, table, chain); family, table, chain);
if (err < 0) if (err < 0)
goto err; goto err_fill_chain_info;
return nlmsg_unicast(nlsk, skb2, NETLINK_CB(skb).portid); return nfnetlink_unicast(skb2, net, NETLINK_CB(skb).portid);
err: err_fill_chain_info:
kfree_skb(skb2); kfree_skb(skb2);
return err; return err;
} }
@ -3008,11 +3008,11 @@ static int nf_tables_getrule(struct net *net, struct sock *nlsk,
nlh->nlmsg_seq, NFT_MSG_NEWRULE, 0, nlh->nlmsg_seq, NFT_MSG_NEWRULE, 0,
family, table, chain, rule, NULL); family, table, chain, rule, NULL);
if (err < 0) if (err < 0)
goto err; goto err_fill_rule_info;
return nlmsg_unicast(nlsk, skb2, NETLINK_CB(skb).portid); return nfnetlink_unicast(skb2, net, NETLINK_CB(skb).portid);
err: err_fill_rule_info:
kfree_skb(skb2); kfree_skb(skb2);
return err; return err;
} }
@ -3770,7 +3770,8 @@ static int nf_tables_fill_set(struct sk_buff *skb, const struct nft_ctx *ctx,
goto nla_put_failure; goto nla_put_failure;
} }
if (nla_put(skb, NFTA_SET_USERDATA, set->udlen, set->udata)) if (set->udata &&
nla_put(skb, NFTA_SET_USERDATA, set->udlen, set->udata))
goto nla_put_failure; goto nla_put_failure;
nest = nla_nest_start_noflag(skb, NFTA_SET_DESC); nest = nla_nest_start_noflag(skb, NFTA_SET_DESC);
@ -3967,11 +3968,11 @@ static int nf_tables_getset(struct net *net, struct sock *nlsk,
err = nf_tables_fill_set(skb2, &ctx, set, NFT_MSG_NEWSET, 0); err = nf_tables_fill_set(skb2, &ctx, set, NFT_MSG_NEWSET, 0);
if (err < 0) if (err < 0)
goto err; goto err_fill_set_info;
return nlmsg_unicast(nlsk, skb2, NETLINK_CB(skb).portid); return nfnetlink_unicast(skb2, net, NETLINK_CB(skb).portid);
err: err_fill_set_info:
kfree_skb(skb2); kfree_skb(skb2);
return err; return err;
} }
@ -4859,24 +4860,18 @@ static int nft_get_set_elem(struct nft_ctx *ctx, struct nft_set *set,
err = -ENOMEM; err = -ENOMEM;
skb = nlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC); skb = nlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
if (skb == NULL) if (skb == NULL)
goto err1; return err;
err = nf_tables_fill_setelem_info(skb, ctx, ctx->seq, ctx->portid, err = nf_tables_fill_setelem_info(skb, ctx, ctx->seq, ctx->portid,
NFT_MSG_NEWSETELEM, 0, set, &elem); NFT_MSG_NEWSETELEM, 0, set, &elem);
if (err < 0) if (err < 0)
goto err2; goto err_fill_setelem;
err = nfnetlink_unicast(skb, ctx->net, ctx->portid, MSG_DONTWAIT); return nfnetlink_unicast(skb, ctx->net, ctx->portid);
/* This avoids a loop in nfnetlink. */
if (err < 0)
goto err1;
return 0; err_fill_setelem:
err2:
kfree_skb(skb); kfree_skb(skb);
err1: return err;
/* this avoids a loop in nfnetlink. */
return err == -EAGAIN ? -ENOBUFS : err;
} }
/* called with rcu_read_lock held */ /* called with rcu_read_lock held */
@ -6181,10 +6176,11 @@ static int nf_tables_getobj(struct net *net, struct sock *nlsk,
nlh->nlmsg_seq, NFT_MSG_NEWOBJ, 0, nlh->nlmsg_seq, NFT_MSG_NEWOBJ, 0,
family, table, obj, reset); family, table, obj, reset);
if (err < 0) if (err < 0)
goto err; goto err_fill_obj_info;
return nlmsg_unicast(nlsk, skb2, NETLINK_CB(skb).portid); return nfnetlink_unicast(skb2, net, NETLINK_CB(skb).portid);
err:
err_fill_obj_info:
kfree_skb(skb2); kfree_skb(skb2);
return err; return err;
} }
@ -7044,10 +7040,11 @@ static int nf_tables_getflowtable(struct net *net, struct sock *nlsk,
NFT_MSG_NEWFLOWTABLE, 0, family, NFT_MSG_NEWFLOWTABLE, 0, family,
flowtable, &flowtable->hook_list); flowtable, &flowtable->hook_list);
if (err < 0) if (err < 0)
goto err; goto err_fill_flowtable_info;
return nlmsg_unicast(nlsk, skb2, NETLINK_CB(skb).portid); return nfnetlink_unicast(skb2, net, NETLINK_CB(skb).portid);
err:
err_fill_flowtable_info:
kfree_skb(skb2); kfree_skb(skb2);
return err; return err;
} }
@ -7233,10 +7230,11 @@ static int nf_tables_getgen(struct net *net, struct sock *nlsk,
err = nf_tables_fill_gen_info(skb2, net, NETLINK_CB(skb).portid, err = nf_tables_fill_gen_info(skb2, net, NETLINK_CB(skb).portid,
nlh->nlmsg_seq); nlh->nlmsg_seq);
if (err < 0) if (err < 0)
goto err; goto err_fill_gen_info;
return nlmsg_unicast(nlsk, skb2, NETLINK_CB(skb).portid); return nfnetlink_unicast(skb2, net, NETLINK_CB(skb).portid);
err:
err_fill_gen_info:
kfree_skb(skb2); kfree_skb(skb2);
return err; return err;
} }

View file

@ -149,10 +149,15 @@ int nfnetlink_set_err(struct net *net, u32 portid, u32 group, int error)
} }
EXPORT_SYMBOL_GPL(nfnetlink_set_err); EXPORT_SYMBOL_GPL(nfnetlink_set_err);
int nfnetlink_unicast(struct sk_buff *skb, struct net *net, u32 portid, int nfnetlink_unicast(struct sk_buff *skb, struct net *net, u32 portid)
int flags)
{ {
return netlink_unicast(net->nfnl, skb, portid, flags); int err;
err = nlmsg_unicast(net->nfnl, skb, portid);
if (err == -EAGAIN)
err = -ENOBUFS;
return err;
} }
EXPORT_SYMBOL_GPL(nfnetlink_unicast); EXPORT_SYMBOL_GPL(nfnetlink_unicast);

View file

@ -356,8 +356,7 @@ __nfulnl_send(struct nfulnl_instance *inst)
goto out; goto out;
} }
} }
nfnetlink_unicast(inst->skb, inst->net, inst->peer_portid, nfnetlink_unicast(inst->skb, inst->net, inst->peer_portid);
MSG_DONTWAIT);
out: out:
inst->qlen = 0; inst->qlen = 0;
inst->skb = NULL; inst->skb = NULL;

View file

@ -681,7 +681,7 @@ __nfqnl_enqueue_packet(struct net *net, struct nfqnl_instance *queue,
*packet_id_ptr = htonl(entry->id); *packet_id_ptr = htonl(entry->id);
/* nfnetlink_unicast will either free the nskb or add it to a socket */ /* nfnetlink_unicast will either free the nskb or add it to a socket */
err = nfnetlink_unicast(nskb, net, queue->peer_portid, MSG_DONTWAIT); err = nfnetlink_unicast(nskb, net, queue->peer_portid);
if (err < 0) { if (err < 0) {
if (queue->flags & NFQA_CFG_F_FAIL_OPEN) { if (queue->flags & NFQA_CFG_F_FAIL_OPEN) {
failopen = 1; failopen = 1;

View file

@ -102,7 +102,7 @@ static void nft_flow_offload_eval(const struct nft_expr *expr,
} }
if (nf_ct_ext_exist(ct, NF_CT_EXT_HELPER) || if (nf_ct_ext_exist(ct, NF_CT_EXT_HELPER) ||
ct->status & IPS_SEQ_ADJUST) ct->status & (IPS_SEQ_ADJUST | IPS_NAT_CLASH))
goto out; goto out;
if (!nf_ct_is_confirmed(ct)) if (!nf_ct_is_confirmed(ct))

View file

@ -87,7 +87,9 @@ void nft_payload_eval(const struct nft_expr *expr,
u32 *dest = &regs->data[priv->dreg]; u32 *dest = &regs->data[priv->dreg];
int offset; int offset;
if (priv->len % NFT_REG32_SIZE)
dest[priv->len / NFT_REG32_SIZE] = 0; dest[priv->len / NFT_REG32_SIZE] = 0;
switch (priv->base) { switch (priv->base) {
case NFT_PAYLOAD_LL_HEADER: case NFT_PAYLOAD_LL_HEADER:
if (!skb_mac_header_was_set(skb)) if (!skb_mac_header_was_set(skb))

View file

@ -218,11 +218,11 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
struct nft_rbtree_elem *new, struct nft_rbtree_elem *new,
struct nft_set_ext **ext) struct nft_set_ext **ext)
{ {
bool overlap = false, dup_end_left = false, dup_end_right = false;
struct nft_rbtree *priv = nft_set_priv(set); struct nft_rbtree *priv = nft_set_priv(set);
u8 genmask = nft_genmask_next(net); u8 genmask = nft_genmask_next(net);
struct nft_rbtree_elem *rbe; struct nft_rbtree_elem *rbe;
struct rb_node *parent, **p; struct rb_node *parent, **p;
bool overlap = false;
int d; int d;
/* Detect overlaps as we descend the tree. Set the flag in these cases: /* Detect overlaps as we descend the tree. Set the flag in these cases:
@ -238,24 +238,44 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
* *
* b1. _ _ __>| !_ _ __| (insert end before existing start) * b1. _ _ __>| !_ _ __| (insert end before existing start)
* b2. _ _ ___| !_ _ _>| (insert end after existing start) * b2. _ _ ___| !_ _ _>| (insert end after existing start)
* b3. _ _ ___! >|_ _ __| (insert start after existing end) * b3. _ _ ___! >|_ _ __| (insert start after existing end, as a leaf)
* '--' no nodes falling in this range
* b4. >|_ _ ! (insert start before existing start)
* *
* Case a3. resolves to b3.: * Case a3. resolves to b3.:
* - if the inserted start element is the leftmost, because the '0' * - if the inserted start element is the leftmost, because the '0'
* element in the tree serves as end element * element in the tree serves as end element
* - otherwise, if an existing end is found. Note that end elements are * - otherwise, if an existing end is found immediately to the left. If
* always inserted after corresponding start elements. * there are existing nodes in between, we need to further descend the
* tree before we can conclude the new start isn't causing an overlap
*
* or to b4., which, preceded by a3., means we already traversed one or
* more existing intervals entirely, from the right.
* *
* For a new, rightmost pair of elements, we'll hit cases b3. and b2., * For a new, rightmost pair of elements, we'll hit cases b3. and b2.,
* in that order. * in that order.
* *
* The flag is also cleared in two special cases: * The flag is also cleared in two special cases:
* *
* b4. |__ _ _!|<_ _ _ (insert start right before existing end) * b5. |__ _ _!|<_ _ _ (insert start right before existing end)
* b5. |__ _ >|!__ _ _ (insert end right after existing start) * b6. |__ _ >|!__ _ _ (insert end right after existing start)
* *
* which always happen as last step and imply that no further * which always happen as last step and imply that no further
* overlapping is possible. * overlapping is possible.
*
* Another special case comes from the fact that start elements matching
* an already existing start element are allowed: insertion is not
* performed but we return -EEXIST in that case, and the error will be
* cleared by the caller if NLM_F_EXCL is not present in the request.
* This way, request for insertion of an exact overlap isn't reported as
* error to userspace if not desired.
*
* However, if the existing start matches a pre-existing start, but the
* end element doesn't match the corresponding pre-existing end element,
* we need to report a partial overlap. This is a local condition that
* can be noticed without need for a tracking flag, by checking for a
* local duplicated end for a corresponding start, from left and right,
* separately.
*/ */
parent = NULL; parent = NULL;
@ -272,26 +292,41 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
if (nft_rbtree_interval_start(new)) { if (nft_rbtree_interval_start(new)) {
if (nft_rbtree_interval_end(rbe) && if (nft_rbtree_interval_end(rbe) &&
nft_set_elem_active(&rbe->ext, genmask) && nft_set_elem_active(&rbe->ext, genmask) &&
!nft_set_elem_expired(&rbe->ext)) !nft_set_elem_expired(&rbe->ext) && !*p)
overlap = false; overlap = false;
} else { } else {
if (dup_end_left && !*p)
return -ENOTEMPTY;
overlap = nft_rbtree_interval_end(rbe) && overlap = nft_rbtree_interval_end(rbe) &&
nft_set_elem_active(&rbe->ext, nft_set_elem_active(&rbe->ext,
genmask) && genmask) &&
!nft_set_elem_expired(&rbe->ext); !nft_set_elem_expired(&rbe->ext);
if (overlap) {
dup_end_right = true;
continue;
}
} }
} else if (d > 0) { } else if (d > 0) {
p = &parent->rb_right; p = &parent->rb_right;
if (nft_rbtree_interval_end(new)) { if (nft_rbtree_interval_end(new)) {
if (dup_end_right && !*p)
return -ENOTEMPTY;
overlap = nft_rbtree_interval_end(rbe) && overlap = nft_rbtree_interval_end(rbe) &&
nft_set_elem_active(&rbe->ext, nft_set_elem_active(&rbe->ext,
genmask) && genmask) &&
!nft_set_elem_expired(&rbe->ext); !nft_set_elem_expired(&rbe->ext);
} else if (nft_rbtree_interval_end(rbe) &&
nft_set_elem_active(&rbe->ext, genmask) && if (overlap) {
dup_end_left = true;
continue;
}
} else if (nft_set_elem_active(&rbe->ext, genmask) &&
!nft_set_elem_expired(&rbe->ext)) { !nft_set_elem_expired(&rbe->ext)) {
overlap = true; overlap = nft_rbtree_interval_end(rbe);
} }
} else { } else {
if (nft_rbtree_interval_end(rbe) && if (nft_rbtree_interval_end(rbe) &&
@ -316,6 +351,8 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
p = &parent->rb_left; p = &parent->rb_left;
} }
} }
dup_end_left = dup_end_right = false;
} }
if (overlap) if (overlap)

View file

@ -640,7 +640,7 @@ static void __net_exit recent_proc_net_exit(struct net *net)
struct recent_table *t; struct recent_table *t;
/* recent_net_exit() is called before recent_mt_destroy(). Make sure /* recent_net_exit() is called before recent_mt_destroy(). Make sure
* that the parent xt_recent proc entry is is empty before trying to * that the parent xt_recent proc entry is empty before trying to
* remove it. * remove it.
*/ */
spin_lock_bh(&recent_lock); spin_lock_bh(&recent_lock);

View file

@ -85,6 +85,7 @@ static void netlbl_domhsh_free_entry(struct rcu_head *entry)
kfree(netlbl_domhsh_addr6_entry(iter6)); kfree(netlbl_domhsh_addr6_entry(iter6));
} }
#endif /* IPv6 */ #endif /* IPv6 */
kfree(ptr->def.addrsel);
} }
kfree(ptr->domain); kfree(ptr->domain);
kfree(ptr); kfree(ptr);
@ -537,6 +538,8 @@ int netlbl_domhsh_add(struct netlbl_dom_map *entry,
goto add_return; goto add_return;
} }
#endif /* IPv6 */ #endif /* IPv6 */
/* cleanup the new entry since we've moved everything over */
netlbl_domhsh_free_entry(&entry->rcu);
} else } else
ret_val = -EINVAL; ret_val = -EINVAL;
@ -580,6 +583,12 @@ int netlbl_domhsh_remove_entry(struct netlbl_dom_map *entry,
{ {
int ret_val = 0; int ret_val = 0;
struct audit_buffer *audit_buf; struct audit_buffer *audit_buf;
struct netlbl_af4list *iter4;
struct netlbl_domaddr4_map *map4;
#if IS_ENABLED(CONFIG_IPV6)
struct netlbl_af6list *iter6;
struct netlbl_domaddr6_map *map6;
#endif /* IPv6 */
if (entry == NULL) if (entry == NULL)
return -ENOENT; return -ENOENT;
@ -597,6 +606,9 @@ int netlbl_domhsh_remove_entry(struct netlbl_dom_map *entry,
ret_val = -ENOENT; ret_val = -ENOENT;
spin_unlock(&netlbl_domhsh_lock); spin_unlock(&netlbl_domhsh_lock);
if (ret_val)
return ret_val;
audit_buf = netlbl_audit_start_common(AUDIT_MAC_MAP_DEL, audit_info); audit_buf = netlbl_audit_start_common(AUDIT_MAC_MAP_DEL, audit_info);
if (audit_buf != NULL) { if (audit_buf != NULL) {
audit_log_format(audit_buf, audit_log_format(audit_buf,
@ -606,24 +618,14 @@ int netlbl_domhsh_remove_entry(struct netlbl_dom_map *entry,
audit_log_end(audit_buf); audit_log_end(audit_buf);
} }
if (ret_val == 0) {
struct netlbl_af4list *iter4;
struct netlbl_domaddr4_map *map4;
#if IS_ENABLED(CONFIG_IPV6)
struct netlbl_af6list *iter6;
struct netlbl_domaddr6_map *map6;
#endif /* IPv6 */
switch (entry->def.type) { switch (entry->def.type) {
case NETLBL_NLTYPE_ADDRSELECT: case NETLBL_NLTYPE_ADDRSELECT:
netlbl_af4list_foreach_rcu(iter4, netlbl_af4list_foreach_rcu(iter4, &entry->def.addrsel->list4) {
&entry->def.addrsel->list4) {
map4 = netlbl_domhsh_addr4_entry(iter4); map4 = netlbl_domhsh_addr4_entry(iter4);
cipso_v4_doi_putdef(map4->def.cipso); cipso_v4_doi_putdef(map4->def.cipso);
} }
#if IS_ENABLED(CONFIG_IPV6) #if IS_ENABLED(CONFIG_IPV6)
netlbl_af6list_foreach_rcu(iter6, netlbl_af6list_foreach_rcu(iter6, &entry->def.addrsel->list6) {
&entry->def.addrsel->list6) {
map6 = netlbl_domhsh_addr6_entry(iter6); map6 = netlbl_domhsh_addr6_entry(iter6);
calipso_doi_putdef(map6->def.calipso); calipso_doi_putdef(map6->def.calipso);
} }
@ -639,7 +641,6 @@ int netlbl_domhsh_remove_entry(struct netlbl_dom_map *entry,
#endif /* IPv6 */ #endif /* IPv6 */
} }
call_rcu(&entry->rcu, netlbl_domhsh_free_entry); call_rcu(&entry->rcu, netlbl_domhsh_free_entry);
}
return ret_val; return ret_val;
} }

View file

@ -353,7 +353,7 @@ static void netlink_rcv_wake(struct sock *sk)
{ {
struct netlink_sock *nlk = nlk_sk(sk); struct netlink_sock *nlk = nlk_sk(sk);
if (skb_queue_empty(&sk->sk_receive_queue)) if (skb_queue_empty_lockless(&sk->sk_receive_queue))
clear_bit(NETLINK_S_CONGESTED, &nlk->state); clear_bit(NETLINK_S_CONGESTED, &nlk->state);
if (!test_bit(NETLINK_S_CONGESTED, &nlk->state)) if (!test_bit(NETLINK_S_CONGESTED, &nlk->state))
wake_up_interruptible(&nlk->wait); wake_up_interruptible(&nlk->wait);

View file

@ -488,7 +488,6 @@ enum rxrpc_call_flag {
RXRPC_CALL_RX_LAST, /* Received the last packet (at rxtx_top) */ RXRPC_CALL_RX_LAST, /* Received the last packet (at rxtx_top) */
RXRPC_CALL_TX_LAST, /* Last packet in Tx buffer (at rxtx_top) */ RXRPC_CALL_TX_LAST, /* Last packet in Tx buffer (at rxtx_top) */
RXRPC_CALL_SEND_PING, /* A ping will need to be sent */ RXRPC_CALL_SEND_PING, /* A ping will need to be sent */
RXRPC_CALL_PINGING, /* Ping in process */
RXRPC_CALL_RETRANS_TIMEOUT, /* Retransmission due to timeout occurred */ RXRPC_CALL_RETRANS_TIMEOUT, /* Retransmission due to timeout occurred */
RXRPC_CALL_BEGAN_RX_TIMER, /* We began the expect_rx_by timer */ RXRPC_CALL_BEGAN_RX_TIMER, /* We began the expect_rx_by timer */
RXRPC_CALL_RX_HEARD, /* The peer responded at least once to this call */ RXRPC_CALL_RX_HEARD, /* The peer responded at least once to this call */
@ -673,9 +672,13 @@ struct rxrpc_call {
rxrpc_seq_t ackr_consumed; /* Highest packet shown consumed */ rxrpc_seq_t ackr_consumed; /* Highest packet shown consumed */
rxrpc_seq_t ackr_seen; /* Highest packet shown seen */ rxrpc_seq_t ackr_seen; /* Highest packet shown seen */
/* ping management */ /* RTT management */
rxrpc_serial_t ping_serial; /* Last ping sent */ rxrpc_serial_t rtt_serial[4]; /* Serial number of DATA or PING sent */
ktime_t ping_time; /* Time last ping sent */ ktime_t rtt_sent_at[4]; /* Time packet sent */
unsigned long rtt_avail; /* Mask of available slots in bits 0-3,
* Mask of pending samples in 8-11 */
#define RXRPC_CALL_RTT_AVAIL_MASK 0xf
#define RXRPC_CALL_RTT_PEND_SHIFT 8
/* transmission-phase ACK management */ /* transmission-phase ACK management */
ktime_t acks_latest_ts; /* Timestamp of latest ACK received */ ktime_t acks_latest_ts; /* Timestamp of latest ACK received */
@ -1037,7 +1040,7 @@ static inline bool __rxrpc_abort_eproto(struct rxrpc_call *call,
/* /*
* rtt.c * rtt.c
*/ */
void rxrpc_peer_add_rtt(struct rxrpc_call *, enum rxrpc_rtt_rx_trace, void rxrpc_peer_add_rtt(struct rxrpc_call *, enum rxrpc_rtt_rx_trace, int,
rxrpc_serial_t, rxrpc_serial_t, ktime_t, ktime_t); rxrpc_serial_t, rxrpc_serial_t, ktime_t, ktime_t);
unsigned long rxrpc_get_rto_backoff(struct rxrpc_peer *, bool); unsigned long rxrpc_get_rto_backoff(struct rxrpc_peer *, bool);
void rxrpc_peer_init_rtt(struct rxrpc_peer *); void rxrpc_peer_init_rtt(struct rxrpc_peer *);

View file

@ -153,6 +153,7 @@ struct rxrpc_call *rxrpc_alloc_call(struct rxrpc_sock *rx, gfp_t gfp,
call->cong_ssthresh = RXRPC_RXTX_BUFF_SIZE - 1; call->cong_ssthresh = RXRPC_RXTX_BUFF_SIZE - 1;
call->rxnet = rxnet; call->rxnet = rxnet;
call->rtt_avail = RXRPC_CALL_RTT_AVAIL_MASK;
atomic_inc(&rxnet->nr_calls); atomic_inc(&rxnet->nr_calls);
return call; return call;

View file

@ -608,36 +608,57 @@ unlock:
} }
/* /*
* Process a requested ACK. * See if there's a cached RTT probe to complete.
*/ */
static void rxrpc_input_requested_ack(struct rxrpc_call *call, static void rxrpc_complete_rtt_probe(struct rxrpc_call *call,
ktime_t resp_time, ktime_t resp_time,
rxrpc_serial_t orig_serial, rxrpc_serial_t acked_serial,
rxrpc_serial_t ack_serial) rxrpc_serial_t ack_serial,
enum rxrpc_rtt_rx_trace type)
{ {
struct rxrpc_skb_priv *sp; rxrpc_serial_t orig_serial;
struct sk_buff *skb; unsigned long avail;
ktime_t sent_at; ktime_t sent_at;
int ix; bool matched = false;
int i;
for (ix = 0; ix < RXRPC_RXTX_BUFF_SIZE; ix++) { avail = READ_ONCE(call->rtt_avail);
skb = call->rxtx_buffer[ix]; smp_rmb(); /* Read avail bits before accessing data. */
if (!skb)
for (i = 0; i < ARRAY_SIZE(call->rtt_serial); i++) {
if (!test_bit(i + RXRPC_CALL_RTT_PEND_SHIFT, &avail))
continue; continue;
sent_at = skb->tstamp; sent_at = call->rtt_sent_at[i];
smp_rmb(); /* Read timestamp before serial. */ orig_serial = call->rtt_serial[i];
sp = rxrpc_skb(skb);
if (sp->hdr.serial != orig_serial) if (orig_serial == acked_serial) {
continue; clear_bit(i + RXRPC_CALL_RTT_PEND_SHIFT, &call->rtt_avail);
goto found; smp_mb(); /* Read data before setting avail bit */
set_bit(i, &call->rtt_avail);
if (type != rxrpc_rtt_rx_cancel)
rxrpc_peer_add_rtt(call, type, i, acked_serial, ack_serial,
sent_at, resp_time);
else
trace_rxrpc_rtt_rx(call, rxrpc_rtt_rx_cancel, i,
orig_serial, acked_serial, 0, 0);
matched = true;
} }
return; /* If a later serial is being acked, then mark this slot as
* being available.
*/
if (after(acked_serial, orig_serial)) {
trace_rxrpc_rtt_rx(call, rxrpc_rtt_rx_obsolete, i,
orig_serial, acked_serial, 0, 0);
clear_bit(i + RXRPC_CALL_RTT_PEND_SHIFT, &call->rtt_avail);
smp_wmb();
set_bit(i, &call->rtt_avail);
}
}
found: if (!matched)
rxrpc_peer_add_rtt(call, rxrpc_rtt_rx_requested_ack, trace_rxrpc_rtt_rx(call, rxrpc_rtt_rx_lost, 9, 0, acked_serial, 0, 0);
orig_serial, ack_serial, sent_at, resp_time);
} }
/* /*
@ -682,27 +703,11 @@ static void rxrpc_input_check_for_lost_ack(struct rxrpc_call *call)
*/ */
static void rxrpc_input_ping_response(struct rxrpc_call *call, static void rxrpc_input_ping_response(struct rxrpc_call *call,
ktime_t resp_time, ktime_t resp_time,
rxrpc_serial_t orig_serial, rxrpc_serial_t acked_serial,
rxrpc_serial_t ack_serial) rxrpc_serial_t ack_serial)
{ {
rxrpc_serial_t ping_serial; if (acked_serial == call->acks_lost_ping)
ktime_t ping_time;
ping_time = call->ping_time;
smp_rmb();
ping_serial = READ_ONCE(call->ping_serial);
if (orig_serial == call->acks_lost_ping)
rxrpc_input_check_for_lost_ack(call); rxrpc_input_check_for_lost_ack(call);
if (before(orig_serial, ping_serial) ||
!test_and_clear_bit(RXRPC_CALL_PINGING, &call->flags))
return;
if (after(orig_serial, ping_serial))
return;
rxrpc_peer_add_rtt(call, rxrpc_rtt_rx_ping_response,
orig_serial, ack_serial, ping_time, resp_time);
} }
/* /*
@ -843,7 +848,7 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
struct rxrpc_ackinfo info; struct rxrpc_ackinfo info;
u8 acks[RXRPC_MAXACKS]; u8 acks[RXRPC_MAXACKS];
} buf; } buf;
rxrpc_serial_t acked_serial; rxrpc_serial_t ack_serial, acked_serial;
rxrpc_seq_t first_soft_ack, hard_ack, prev_pkt; rxrpc_seq_t first_soft_ack, hard_ack, prev_pkt;
int nr_acks, offset, ioffset; int nr_acks, offset, ioffset;
@ -856,6 +861,7 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
} }
offset += sizeof(buf.ack); offset += sizeof(buf.ack);
ack_serial = sp->hdr.serial;
acked_serial = ntohl(buf.ack.serial); acked_serial = ntohl(buf.ack.serial);
first_soft_ack = ntohl(buf.ack.firstPacket); first_soft_ack = ntohl(buf.ack.firstPacket);
prev_pkt = ntohl(buf.ack.previousPacket); prev_pkt = ntohl(buf.ack.previousPacket);
@ -864,31 +870,42 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
summary.ack_reason = (buf.ack.reason < RXRPC_ACK__INVALID ? summary.ack_reason = (buf.ack.reason < RXRPC_ACK__INVALID ?
buf.ack.reason : RXRPC_ACK__INVALID); buf.ack.reason : RXRPC_ACK__INVALID);
trace_rxrpc_rx_ack(call, sp->hdr.serial, acked_serial, trace_rxrpc_rx_ack(call, ack_serial, acked_serial,
first_soft_ack, prev_pkt, first_soft_ack, prev_pkt,
summary.ack_reason, nr_acks); summary.ack_reason, nr_acks);
if (buf.ack.reason == RXRPC_ACK_PING_RESPONSE) switch (buf.ack.reason) {
case RXRPC_ACK_PING_RESPONSE:
rxrpc_input_ping_response(call, skb->tstamp, acked_serial, rxrpc_input_ping_response(call, skb->tstamp, acked_serial,
sp->hdr.serial); ack_serial);
if (buf.ack.reason == RXRPC_ACK_REQUESTED) rxrpc_complete_rtt_probe(call, skb->tstamp, acked_serial, ack_serial,
rxrpc_input_requested_ack(call, skb->tstamp, acked_serial, rxrpc_rtt_rx_ping_response);
sp->hdr.serial); break;
case RXRPC_ACK_REQUESTED:
rxrpc_complete_rtt_probe(call, skb->tstamp, acked_serial, ack_serial,
rxrpc_rtt_rx_requested_ack);
break;
default:
if (acked_serial != 0)
rxrpc_complete_rtt_probe(call, skb->tstamp, acked_serial, ack_serial,
rxrpc_rtt_rx_cancel);
break;
}
if (buf.ack.reason == RXRPC_ACK_PING) { if (buf.ack.reason == RXRPC_ACK_PING) {
_proto("Rx ACK %%%u PING Request", sp->hdr.serial); _proto("Rx ACK %%%u PING Request", ack_serial);
rxrpc_propose_ACK(call, RXRPC_ACK_PING_RESPONSE, rxrpc_propose_ACK(call, RXRPC_ACK_PING_RESPONSE,
sp->hdr.serial, true, true, ack_serial, true, true,
rxrpc_propose_ack_respond_to_ping); rxrpc_propose_ack_respond_to_ping);
} else if (sp->hdr.flags & RXRPC_REQUEST_ACK) { } else if (sp->hdr.flags & RXRPC_REQUEST_ACK) {
rxrpc_propose_ACK(call, RXRPC_ACK_REQUESTED, rxrpc_propose_ACK(call, RXRPC_ACK_REQUESTED,
sp->hdr.serial, true, true, ack_serial, true, true,
rxrpc_propose_ack_respond_to_ack); rxrpc_propose_ack_respond_to_ack);
} }
/* Discard any out-of-order or duplicate ACKs (outside lock). */ /* Discard any out-of-order or duplicate ACKs (outside lock). */
if (!rxrpc_is_ack_valid(call, first_soft_ack, prev_pkt)) { if (!rxrpc_is_ack_valid(call, first_soft_ack, prev_pkt)) {
trace_rxrpc_rx_discard_ack(call->debug_id, sp->hdr.serial, trace_rxrpc_rx_discard_ack(call->debug_id, ack_serial,
first_soft_ack, call->ackr_first_seq, first_soft_ack, call->ackr_first_seq,
prev_pkt, call->ackr_prev_seq); prev_pkt, call->ackr_prev_seq);
return; return;
@ -904,7 +921,7 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
/* Discard any out-of-order or duplicate ACKs (inside lock). */ /* Discard any out-of-order or duplicate ACKs (inside lock). */
if (!rxrpc_is_ack_valid(call, first_soft_ack, prev_pkt)) { if (!rxrpc_is_ack_valid(call, first_soft_ack, prev_pkt)) {
trace_rxrpc_rx_discard_ack(call->debug_id, sp->hdr.serial, trace_rxrpc_rx_discard_ack(call->debug_id, ack_serial,
first_soft_ack, call->ackr_first_seq, first_soft_ack, call->ackr_first_seq,
prev_pkt, call->ackr_prev_seq); prev_pkt, call->ackr_prev_seq);
goto out; goto out;
@ -964,7 +981,7 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
RXRPC_TX_ANNO_LAST && RXRPC_TX_ANNO_LAST &&
summary.nr_acks == call->tx_top - hard_ack && summary.nr_acks == call->tx_top - hard_ack &&
rxrpc_is_client_call(call)) rxrpc_is_client_call(call))
rxrpc_propose_ACK(call, RXRPC_ACK_PING, sp->hdr.serial, rxrpc_propose_ACK(call, RXRPC_ACK_PING, ack_serial,
false, true, false, true,
rxrpc_propose_ack_ping_for_lost_reply); rxrpc_propose_ack_ping_for_lost_reply);

View file

@ -123,6 +123,49 @@ static size_t rxrpc_fill_out_ack(struct rxrpc_connection *conn,
return top - hard_ack + 3; return top - hard_ack + 3;
} }
/*
* Record the beginning of an RTT probe.
*/
static int rxrpc_begin_rtt_probe(struct rxrpc_call *call, rxrpc_serial_t serial,
enum rxrpc_rtt_tx_trace why)
{
unsigned long avail = call->rtt_avail;
int rtt_slot = 9;
if (!(avail & RXRPC_CALL_RTT_AVAIL_MASK))
goto no_slot;
rtt_slot = __ffs(avail & RXRPC_CALL_RTT_AVAIL_MASK);
if (!test_and_clear_bit(rtt_slot, &call->rtt_avail))
goto no_slot;
call->rtt_serial[rtt_slot] = serial;
call->rtt_sent_at[rtt_slot] = ktime_get_real();
smp_wmb(); /* Write data before avail bit */
set_bit(rtt_slot + RXRPC_CALL_RTT_PEND_SHIFT, &call->rtt_avail);
trace_rxrpc_rtt_tx(call, why, rtt_slot, serial);
return rtt_slot;
no_slot:
trace_rxrpc_rtt_tx(call, rxrpc_rtt_tx_no_slot, rtt_slot, serial);
return -1;
}
/*
* Cancel an RTT probe.
*/
static void rxrpc_cancel_rtt_probe(struct rxrpc_call *call,
rxrpc_serial_t serial, int rtt_slot)
{
if (rtt_slot != -1) {
clear_bit(rtt_slot + RXRPC_CALL_RTT_PEND_SHIFT, &call->rtt_avail);
smp_wmb(); /* Clear pending bit before setting slot */
set_bit(rtt_slot, &call->rtt_avail);
trace_rxrpc_rtt_tx(call, rxrpc_rtt_tx_cancel, rtt_slot, serial);
}
}
/* /*
* Send an ACK call packet. * Send an ACK call packet.
*/ */
@ -136,7 +179,7 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping,
rxrpc_serial_t serial; rxrpc_serial_t serial;
rxrpc_seq_t hard_ack, top; rxrpc_seq_t hard_ack, top;
size_t len, n; size_t len, n;
int ret; int ret, rtt_slot = -1;
u8 reason; u8 reason;
if (test_bit(RXRPC_CALL_DISCONNECTED, &call->flags)) if (test_bit(RXRPC_CALL_DISCONNECTED, &call->flags))
@ -196,18 +239,8 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping,
if (_serial) if (_serial)
*_serial = serial; *_serial = serial;
if (ping) { if (ping)
call->ping_serial = serial; rtt_slot = rxrpc_begin_rtt_probe(call, serial, rxrpc_rtt_tx_ping);
smp_wmb();
/* We need to stick a time in before we send the packet in case
* the reply gets back before kernel_sendmsg() completes - but
* asking UDP to send the packet can take a relatively long
* time.
*/
call->ping_time = ktime_get_real();
set_bit(RXRPC_CALL_PINGING, &call->flags);
trace_rxrpc_rtt_tx(call, rxrpc_rtt_tx_ping, serial);
}
ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, 2, len); ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, 2, len);
conn->params.peer->last_tx_at = ktime_get_seconds(); conn->params.peer->last_tx_at = ktime_get_seconds();
@ -221,8 +254,7 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping,
if (call->state < RXRPC_CALL_COMPLETE) { if (call->state < RXRPC_CALL_COMPLETE) {
if (ret < 0) { if (ret < 0) {
if (ping) rxrpc_cancel_rtt_probe(call, serial, rtt_slot);
clear_bit(RXRPC_CALL_PINGING, &call->flags);
rxrpc_propose_ACK(call, pkt->ack.reason, rxrpc_propose_ACK(call, pkt->ack.reason,
ntohl(pkt->ack.serial), ntohl(pkt->ack.serial),
false, true, false, true,
@ -321,7 +353,7 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb,
struct kvec iov[2]; struct kvec iov[2];
rxrpc_serial_t serial; rxrpc_serial_t serial;
size_t len; size_t len;
int ret; int ret, rtt_slot = -1;
_enter(",{%d}", skb->len); _enter(",{%d}", skb->len);
@ -397,6 +429,8 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb,
sp->hdr.serial = serial; sp->hdr.serial = serial;
smp_wmb(); /* Set serial before timestamp */ smp_wmb(); /* Set serial before timestamp */
skb->tstamp = ktime_get_real(); skb->tstamp = ktime_get_real();
if (whdr.flags & RXRPC_REQUEST_ACK)
rtt_slot = rxrpc_begin_rtt_probe(call, serial, rxrpc_rtt_tx_data);
/* send the packet by UDP /* send the packet by UDP
* - returns -EMSGSIZE if UDP would have to fragment the packet * - returns -EMSGSIZE if UDP would have to fragment the packet
@ -408,12 +442,15 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb,
conn->params.peer->last_tx_at = ktime_get_seconds(); conn->params.peer->last_tx_at = ktime_get_seconds();
up_read(&conn->params.local->defrag_sem); up_read(&conn->params.local->defrag_sem);
if (ret < 0) if (ret < 0) {
rxrpc_cancel_rtt_probe(call, serial, rtt_slot);
trace_rxrpc_tx_fail(call->debug_id, serial, ret, trace_rxrpc_tx_fail(call->debug_id, serial, ret,
rxrpc_tx_point_call_data_nofrag); rxrpc_tx_point_call_data_nofrag);
else } else {
trace_rxrpc_tx_packet(call->debug_id, &whdr, trace_rxrpc_tx_packet(call->debug_id, &whdr,
rxrpc_tx_point_call_data_nofrag); rxrpc_tx_point_call_data_nofrag);
}
rxrpc_tx_backoff(call, ret); rxrpc_tx_backoff(call, ret);
if (ret == -EMSGSIZE) if (ret == -EMSGSIZE)
goto send_fragmentable; goto send_fragmentable;
@ -422,7 +459,6 @@ done:
if (ret >= 0) { if (ret >= 0) {
if (whdr.flags & RXRPC_REQUEST_ACK) { if (whdr.flags & RXRPC_REQUEST_ACK) {
call->peer->rtt_last_req = skb->tstamp; call->peer->rtt_last_req = skb->tstamp;
trace_rxrpc_rtt_tx(call, rxrpc_rtt_tx_data, serial);
if (call->peer->rtt_count > 1) { if (call->peer->rtt_count > 1) {
unsigned long nowj = jiffies, ack_lost_at; unsigned long nowj = jiffies, ack_lost_at;
@ -469,6 +505,8 @@ send_fragmentable:
sp->hdr.serial = serial; sp->hdr.serial = serial;
smp_wmb(); /* Set serial before timestamp */ smp_wmb(); /* Set serial before timestamp */
skb->tstamp = ktime_get_real(); skb->tstamp = ktime_get_real();
if (whdr.flags & RXRPC_REQUEST_ACK)
rtt_slot = rxrpc_begin_rtt_probe(call, serial, rxrpc_rtt_tx_data);
switch (conn->params.local->srx.transport.family) { switch (conn->params.local->srx.transport.family) {
case AF_INET6: case AF_INET6:
@ -487,12 +525,14 @@ send_fragmentable:
BUG(); BUG();
} }
if (ret < 0) if (ret < 0) {
rxrpc_cancel_rtt_probe(call, serial, rtt_slot);
trace_rxrpc_tx_fail(call->debug_id, serial, ret, trace_rxrpc_tx_fail(call->debug_id, serial, ret,
rxrpc_tx_point_call_data_frag); rxrpc_tx_point_call_data_frag);
else } else {
trace_rxrpc_tx_packet(call->debug_id, &whdr, trace_rxrpc_tx_packet(call->debug_id, &whdr,
rxrpc_tx_point_call_data_frag); rxrpc_tx_point_call_data_frag);
}
rxrpc_tx_backoff(call, ret); rxrpc_tx_backoff(call, ret);
up_write(&conn->params.local->defrag_sem); up_write(&conn->params.local->defrag_sem);

View file

@ -502,11 +502,21 @@ EXPORT_SYMBOL(rxrpc_kernel_get_peer);
* rxrpc_kernel_get_srtt - Get a call's peer smoothed RTT * rxrpc_kernel_get_srtt - Get a call's peer smoothed RTT
* @sock: The socket on which the call is in progress. * @sock: The socket on which the call is in progress.
* @call: The call to query * @call: The call to query
* @_srtt: Where to store the SRTT value.
* *
* Get the call's peer smoothed RTT. * Get the call's peer smoothed RTT in uS.
*/ */
u32 rxrpc_kernel_get_srtt(struct socket *sock, struct rxrpc_call *call) bool rxrpc_kernel_get_srtt(struct socket *sock, struct rxrpc_call *call,
u32 *_srtt)
{ {
return call->peer->srtt_us >> 3; struct rxrpc_peer *peer = call->peer;
if (peer->rtt_count == 0) {
*_srtt = 1000000; /* 1S */
return false;
}
*_srtt = call->peer->srtt_us >> 3;
return true;
} }
EXPORT_SYMBOL(rxrpc_kernel_get_srtt); EXPORT_SYMBOL(rxrpc_kernel_get_srtt);

View file

@ -146,6 +146,7 @@ static void rxrpc_ack_update_rtt(struct rxrpc_peer *peer, long rtt_us)
* exclusive access to the peer RTT data. * exclusive access to the peer RTT data.
*/ */
void rxrpc_peer_add_rtt(struct rxrpc_call *call, enum rxrpc_rtt_rx_trace why, void rxrpc_peer_add_rtt(struct rxrpc_call *call, enum rxrpc_rtt_rx_trace why,
int rtt_slot,
rxrpc_serial_t send_serial, rxrpc_serial_t resp_serial, rxrpc_serial_t send_serial, rxrpc_serial_t resp_serial,
ktime_t send_time, ktime_t resp_time) ktime_t send_time, ktime_t resp_time)
{ {
@ -162,7 +163,7 @@ void rxrpc_peer_add_rtt(struct rxrpc_call *call, enum rxrpc_rtt_rx_trace why,
peer->rtt_count++; peer->rtt_count++;
spin_unlock(&peer->rtt_input_lock); spin_unlock(&peer->rtt_input_lock);
trace_rxrpc_rtt_rx(call, why, send_serial, resp_serial, trace_rxrpc_rtt_rx(call, why, rtt_slot, send_serial, resp_serial,
peer->srtt_us >> 3, peer->rto_j); peer->srtt_us >> 3, peer->rto_j);
} }

View file

@ -1137,7 +1137,7 @@ static int rxkad_verify_response(struct rxrpc_connection *conn,
ret = -ENOMEM; ret = -ENOMEM;
ticket = kmalloc(ticket_len, GFP_NOFS); ticket = kmalloc(ticket_len, GFP_NOFS);
if (!ticket) if (!ticket)
goto temporary_error; goto temporary_error_free_resp;
eproto = tracepoint_string("rxkad_tkt_short"); eproto = tracepoint_string("rxkad_tkt_short");
abort_code = RXKADPACKETSHORT; abort_code = RXKADPACKETSHORT;
@ -1230,6 +1230,7 @@ protocol_error:
temporary_error_free_ticket: temporary_error_free_ticket:
kfree(ticket); kfree(ticket);
temporary_error_free_resp:
kfree(response); kfree(response);
temporary_error: temporary_error:
/* Ignore the response packet if we got a temporary error such as /* Ignore the response packet if we got a temporary error such as

View file

@ -353,23 +353,11 @@ static int red_init(struct Qdisc *sch, struct nlattr *opt,
FLOW_BLOCK_BINDER_TYPE_RED_EARLY_DROP, FLOW_BLOCK_BINDER_TYPE_RED_EARLY_DROP,
tb[TCA_RED_EARLY_DROP_BLOCK], extack); tb[TCA_RED_EARLY_DROP_BLOCK], extack);
if (err) if (err)
goto err_early_drop_init; return err;
err = tcf_qevent_init(&q->qe_mark, sch, return tcf_qevent_init(&q->qe_mark, sch,
FLOW_BLOCK_BINDER_TYPE_RED_MARK, FLOW_BLOCK_BINDER_TYPE_RED_MARK,
tb[TCA_RED_MARK_BLOCK], extack); tb[TCA_RED_MARK_BLOCK], extack);
if (err)
goto err_mark_init;
return 0;
err_mark_init:
tcf_qevent_destroy(&q->qe_early_drop, sch);
err_early_drop_init:
del_timer_sync(&q->adapt_timer);
red_offload(sch, false);
qdisc_put(q->qdisc);
return err;
} }
static int red_change(struct Qdisc *sch, struct nlattr *opt, static int red_change(struct Qdisc *sch, struct nlattr *opt,

View file

@ -1176,9 +1176,27 @@ static void taprio_offload_config_changed(struct taprio_sched *q)
spin_unlock(&q->current_entry_lock); spin_unlock(&q->current_entry_lock);
} }
static void taprio_sched_to_offload(struct taprio_sched *q, static u32 tc_map_to_queue_mask(struct net_device *dev, u32 tc_mask)
{
u32 i, queue_mask = 0;
for (i = 0; i < dev->num_tc; i++) {
u32 offset, count;
if (!(tc_mask & BIT(i)))
continue;
offset = dev->tc_to_txq[i].offset;
count = dev->tc_to_txq[i].count;
queue_mask |= GENMASK(offset + count - 1, offset);
}
return queue_mask;
}
static void taprio_sched_to_offload(struct net_device *dev,
struct sched_gate_list *sched, struct sched_gate_list *sched,
const struct tc_mqprio_qopt *mqprio,
struct tc_taprio_qopt_offload *offload) struct tc_taprio_qopt_offload *offload)
{ {
struct sched_entry *entry; struct sched_entry *entry;
@ -1193,7 +1211,8 @@ static void taprio_sched_to_offload(struct taprio_sched *q,
e->command = entry->command; e->command = entry->command;
e->interval = entry->interval; e->interval = entry->interval;
e->gate_mask = entry->gate_mask; e->gate_mask = tc_map_to_queue_mask(dev, entry->gate_mask);
i++; i++;
} }
@ -1201,7 +1220,6 @@ static void taprio_sched_to_offload(struct taprio_sched *q,
} }
static int taprio_enable_offload(struct net_device *dev, static int taprio_enable_offload(struct net_device *dev,
struct tc_mqprio_qopt *mqprio,
struct taprio_sched *q, struct taprio_sched *q,
struct sched_gate_list *sched, struct sched_gate_list *sched,
struct netlink_ext_ack *extack) struct netlink_ext_ack *extack)
@ -1223,7 +1241,7 @@ static int taprio_enable_offload(struct net_device *dev,
return -ENOMEM; return -ENOMEM;
} }
offload->enable = 1; offload->enable = 1;
taprio_sched_to_offload(q, sched, mqprio, offload); taprio_sched_to_offload(dev, sched, offload);
err = ops->ndo_setup_tc(dev, TC_SETUP_QDISC_TAPRIO, offload); err = ops->ndo_setup_tc(dev, TC_SETUP_QDISC_TAPRIO, offload);
if (err < 0) { if (err < 0) {
@ -1485,7 +1503,7 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
} }
if (FULL_OFFLOAD_IS_ENABLED(q->flags)) if (FULL_OFFLOAD_IS_ENABLED(q->flags))
err = taprio_enable_offload(dev, mqprio, q, new_admin, extack); err = taprio_enable_offload(dev, q, new_admin, extack);
else else
err = taprio_disable_offload(dev, q, extack); err = taprio_disable_offload(dev, q, extack);
if (err) if (err)

View file

@ -8060,8 +8060,6 @@ static int sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
pr_debug("%s: begins, snum:%d\n", __func__, snum); pr_debug("%s: begins, snum:%d\n", __func__, snum);
local_bh_disable();
if (snum == 0) { if (snum == 0) {
/* Search for an available port. */ /* Search for an available port. */
int low, high, remaining, index; int low, high, remaining, index;
@ -8079,20 +8077,21 @@ static int sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
continue; continue;
index = sctp_phashfn(net, rover); index = sctp_phashfn(net, rover);
head = &sctp_port_hashtable[index]; head = &sctp_port_hashtable[index];
spin_lock(&head->lock); spin_lock_bh(&head->lock);
sctp_for_each_hentry(pp, &head->chain) sctp_for_each_hentry(pp, &head->chain)
if ((pp->port == rover) && if ((pp->port == rover) &&
net_eq(net, pp->net)) net_eq(net, pp->net))
goto next; goto next;
break; break;
next: next:
spin_unlock(&head->lock); spin_unlock_bh(&head->lock);
cond_resched();
} while (--remaining > 0); } while (--remaining > 0);
/* Exhausted local port range during search? */ /* Exhausted local port range during search? */
ret = 1; ret = 1;
if (remaining <= 0) if (remaining <= 0)
goto fail; return ret;
/* OK, here is the one we will use. HEAD (the port /* OK, here is the one we will use. HEAD (the port
* hash table list entry) is non-NULL and we hold it's * hash table list entry) is non-NULL and we hold it's
@ -8107,7 +8106,7 @@ static int sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
* port iterator, pp being NULL. * port iterator, pp being NULL.
*/ */
head = &sctp_port_hashtable[sctp_phashfn(net, snum)]; head = &sctp_port_hashtable[sctp_phashfn(net, snum)];
spin_lock(&head->lock); spin_lock_bh(&head->lock);
sctp_for_each_hentry(pp, &head->chain) { sctp_for_each_hentry(pp, &head->chain) {
if ((pp->port == snum) && net_eq(pp->net, net)) if ((pp->port == snum) && net_eq(pp->net, net))
goto pp_found; goto pp_found;
@ -8207,10 +8206,7 @@ success:
ret = 0; ret = 0;
fail_unlock: fail_unlock:
spin_unlock(&head->lock); spin_unlock_bh(&head->lock);
fail:
local_bh_enable();
return ret; return ret;
} }

View file

@ -116,7 +116,6 @@ static void smc_close_cancel_work(struct smc_sock *smc)
cancel_work_sync(&smc->conn.close_work); cancel_work_sync(&smc->conn.close_work);
cancel_delayed_work_sync(&smc->conn.tx_work); cancel_delayed_work_sync(&smc->conn.tx_work);
lock_sock(sk); lock_sock(sk);
sk->sk_state = SMC_CLOSED;
} }
/* terminate smc socket abnormally - active abort /* terminate smc socket abnormally - active abort
@ -134,22 +133,22 @@ void smc_close_active_abort(struct smc_sock *smc)
} }
switch (sk->sk_state) { switch (sk->sk_state) {
case SMC_ACTIVE: case SMC_ACTIVE:
sk->sk_state = SMC_PEERABORTWAIT;
smc_close_cancel_work(smc);
sk->sk_state = SMC_CLOSED;
sock_put(sk); /* passive closing */
break;
case SMC_APPCLOSEWAIT1: case SMC_APPCLOSEWAIT1:
case SMC_APPCLOSEWAIT2: case SMC_APPCLOSEWAIT2:
sk->sk_state = SMC_PEERABORTWAIT;
smc_close_cancel_work(smc); smc_close_cancel_work(smc);
if (sk->sk_state != SMC_PEERABORTWAIT)
break;
sk->sk_state = SMC_CLOSED; sk->sk_state = SMC_CLOSED;
sock_put(sk); /* postponed passive closing */ sock_put(sk); /* (postponed) passive closing */
break; break;
case SMC_PEERCLOSEWAIT1: case SMC_PEERCLOSEWAIT1:
case SMC_PEERCLOSEWAIT2: case SMC_PEERCLOSEWAIT2:
case SMC_PEERFINCLOSEWAIT: case SMC_PEERFINCLOSEWAIT:
sk->sk_state = SMC_PEERABORTWAIT; sk->sk_state = SMC_PEERABORTWAIT;
smc_close_cancel_work(smc); smc_close_cancel_work(smc);
if (sk->sk_state != SMC_PEERABORTWAIT)
break;
sk->sk_state = SMC_CLOSED; sk->sk_state = SMC_CLOSED;
smc_conn_free(&smc->conn); smc_conn_free(&smc->conn);
release_clcsock = true; release_clcsock = true;
@ -159,6 +158,8 @@ void smc_close_active_abort(struct smc_sock *smc)
case SMC_APPFINCLOSEWAIT: case SMC_APPFINCLOSEWAIT:
sk->sk_state = SMC_PEERABORTWAIT; sk->sk_state = SMC_PEERABORTWAIT;
smc_close_cancel_work(smc); smc_close_cancel_work(smc);
if (sk->sk_state != SMC_PEERABORTWAIT)
break;
sk->sk_state = SMC_CLOSED; sk->sk_state = SMC_CLOSED;
smc_conn_free(&smc->conn); smc_conn_free(&smc->conn);
release_clcsock = true; release_clcsock = true;

View file

@ -1356,6 +1356,8 @@ create:
if (ini->is_smcd) { if (ini->is_smcd) {
conn->rx_off = sizeof(struct smcd_cdc_msg); conn->rx_off = sizeof(struct smcd_cdc_msg);
smcd_cdc_rx_init(conn); /* init tasklet for this conn */ smcd_cdc_rx_init(conn); /* init tasklet for this conn */
} else {
conn->rx_off = 0;
} }
#ifndef KERNEL_HAS_ATOMIC64 #ifndef KERNEL_HAS_ATOMIC64
spin_lock_init(&conn->acurs_lock); spin_lock_init(&conn->acurs_lock);
@ -1777,6 +1779,7 @@ int smc_buf_create(struct smc_sock *smc, bool is_smcd)
list_del(&smc->conn.sndbuf_desc->list); list_del(&smc->conn.sndbuf_desc->list);
mutex_unlock(&smc->conn.lgr->sndbufs_lock); mutex_unlock(&smc->conn.lgr->sndbufs_lock);
smc_buf_free(smc->conn.lgr, false, smc->conn.sndbuf_desc); smc_buf_free(smc->conn.lgr, false, smc->conn.sndbuf_desc);
smc->conn.sndbuf_desc = NULL;
} }
return rc; return rc;
} }

View file

@ -841,6 +841,9 @@ int smc_llc_cli_add_link(struct smc_link *link, struct smc_llc_qentry *qentry)
struct smc_init_info ini; struct smc_init_info ini;
int lnk_idx, rc = 0; int lnk_idx, rc = 0;
if (!llc->qp_mtu)
goto out_reject;
ini.vlan_id = lgr->vlan_id; ini.vlan_id = lgr->vlan_id;
smc_pnet_find_alt_roce(lgr, &ini, link->smcibdev); smc_pnet_find_alt_roce(lgr, &ini, link->smcibdev);
if (!memcmp(llc->sender_gid, link->peer_gid, SMC_GID_SIZE) && if (!memcmp(llc->sender_gid, link->peer_gid, SMC_GID_SIZE) &&
@ -917,10 +920,20 @@ out:
kfree(qentry); kfree(qentry);
} }
static bool smc_llc_is_empty_llc_message(union smc_llc_msg *llc)
{
int i;
for (i = 0; i < ARRAY_SIZE(llc->raw.data); i++)
if (llc->raw.data[i])
return false;
return true;
}
static bool smc_llc_is_local_add_link(union smc_llc_msg *llc) static bool smc_llc_is_local_add_link(union smc_llc_msg *llc)
{ {
if (llc->raw.hdr.common.type == SMC_LLC_ADD_LINK && if (llc->raw.hdr.common.type == SMC_LLC_ADD_LINK &&
!llc->add_link.qp_mtu && !llc->add_link.link_num) smc_llc_is_empty_llc_message(llc))
return true; return true;
return false; return false;
} }

Some files were not shown because too many files have changed in this diff Show more