Networking fixes for 6.1-rc7, including fixes from rxrpc, netfilter and

xfrm
 
 Current release - regressions:
 
  - dccp/tcp: fix bhash2 issues related to WARN_ON() in inet_csk_get_port().
 
  - l2tp: don't sleep and disable BH under writer-side sk_callback_lock
 
  - eth: ice: fix handling of burst tx timestamps
 
 Current release - new code bugs:
 
  - xfrm: squelch kernel warning in case XFRM encap type is not available
 
  - eth: mlx5e: fix possible race condition in macsec extended packet number update routine
 
 Previous releases - regressions:
 
  - neigh: decrement the family specific qlen
 
  - netfilter: fix ipset regression
 
  - rxrpc: fix race between conn bundle lookup and bundle removal [ZDI-CAN-15975]
 
  - eth: iavf: do not restart tx queues after reset task failure
 
  - eth: nfp: add port from netdev validation for EEPROM access
 
  - eth: mtk_eth_soc: fix potential memory leak in mtk_rx_alloc()
 
 Previous releases - always broken:
 
  - tipc: set con sock in tipc_conn_alloc
 
  - nfc:
    - fix potential memory leaks
    - fix incorrect sizing calculations in EVT_TRANSACTION
 
  - eth: octeontx2-af: fix pci device refcount leak
 
  - eth: bonding: fix ICMPv6 header handling when receiving IPv6 messages
 
  - eth: prestera: add missing unregister_netdev() in prestera_port_create()
 
  - eth: tsnep: fix rotten packets
 
 Misc:
 
  - usb: qmi_wwan: add support for LARA-L6.
 
 Signed-off-by: Paolo Abeni <pabeni@redhat.com>
 -----BEGIN PGP SIGNATURE-----
 
 iQJGBAABCAAwFiEEg1AjqC77wbdLX2LbKSR5jcyPE6QFAmN/RqUSHHBhYmVuaUBy
 ZWRoYXQuY29tAAoJECkkeY3MjxOk1bAP/0piVTgIfDmdK4wgYbTwmHIQIIC48djR
 PAvP4zcHaObTOYw9MxTrhuLrOSMrZSs3gg5ZqZ6nitcZVE9eGFPICwgao5MEoAdl
 d6IbxreKYwSubu4mW+WaDpvSwOB0TU63MUWA1FKPRxx3nCU8Y2+gQvK9gomOLKIr
 ylTRLI/x28IU0/msXuOL99TO2cetKiL4/rfIovs7Zdxje/bYNRW8t/DcV2WjN7If
 Yd8F6bHqrSc9Sn087K9vZ+KhvCxoeyyiIg0kJmHJOsrjPWJvnKk5bVFu0r014r7h
 0oj0XuWBjZwe6G6kHaNfCirHKfMlG6qlyKTYIcL0w/NM/ctCep1L30v8TfOUCZoH
 RJ+rn2YvnFMX2IPzsMUhYyDv57VTL3T1UiWdwL6dW21jeoHjgjwGqqhEJXFl8SI1
 UFBdaYKHX1wJ2/OTTDc/n9XbdRAbtwzmh8umSYoMe9Q11XbGRMf/Qed0A1X4Omjr
 8FSNG1EP2PEpwJn8k5qCAsfH+DtJtGGZ9JX4Jz+inERkESjHdLS9TQEKrxIobeJC
 wuTXjlclc2rr8tUfC0TmFpLHGM/lY9IdB6a4tD/LrK+xqrhvzjmE5Nz2CYswzy6j
 DgXR9iSbChZQVw26QHrCvxNMXDlHlmpnQB/U/yIzSLTQTE4yEJJxPr9n3oeQSkFm
 KFcng4OIRL5E
 =ohys
 -----END PGP SIGNATURE-----

Merge tag 'net-6.1-rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

Pull networking fixes from Paolo Abeni:
 "Including fixes from rxrpc, netfilter and xfrm.

  Current release - regressions:

   - dccp/tcp: fix bhash2 issues related to WARN_ON() in
     inet_csk_get_port()

   - l2tp: don't sleep and disable BH under writer-side sk_callback_lock

   - eth: ice: fix handling of burst tx timestamps

  Current release - new code bugs:

   - xfrm: squelch kernel warning in case XFRM encap type is not
     available

   - eth: mlx5e: fix possible race condition in macsec extended packet
     number update routine

  Previous releases - regressions:

   - neigh: decrement the family specific qlen

   - netfilter: fix ipset regression

   - rxrpc: fix race between conn bundle lookup and bundle removal
     [ZDI-CAN-15975]

   - eth: iavf: do not restart tx queues after reset task failure

   - eth: nfp: add port from netdev validation for EEPROM access

   - eth: mtk_eth_soc: fix potential memory leak in mtk_rx_alloc()

  Previous releases - always broken:

   - tipc: set con sock in tipc_conn_alloc

   - nfc:
      - fix potential memory leaks
      - fix incorrect sizing calculations in EVT_TRANSACTION

   - eth: octeontx2-af: fix pci device refcount leak

   - eth: bonding: fix ICMPv6 header handling when receiving IPv6
     messages

   - eth: prestera: add missing unregister_netdev() in
     prestera_port_create()

   - eth: tsnep: fix rotten packets

  Misc:

   - usb: qmi_wwan: add support for LARA-L6"

* tag 'net-6.1-rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (95 commits)
  net: thunderx: Fix the ACPI memory leak
  octeontx2-af: Fix reference count issue in rvu_sdp_init()
  net: altera_tse: release phylink resources in tse_shutdown()
  virtio_net: Fix probe failed when modprobe virtio_net
  net: wwan: t7xx: Fix the ACPI memory leak
  octeontx2-pf: Add check for devm_kcalloc
  net: enetc: preserve TX ring priority across reconfiguration
  net: marvell: prestera: add missing unregister_netdev() in prestera_port_create()
  nfc: st-nci: fix incorrect sizing calculations in EVT_TRANSACTION
  nfc: st-nci: fix memory leaks in EVT_TRANSACTION
  nfc: st-nci: fix incorrect validating logic in EVT_TRANSACTION
  Documentation: networking: Update generic_netlink_howto URL
  net/cdc_ncm: Fix multicast RX support for CDC NCM devices with ZLP
  net: usb: qmi_wwan: add u-blox 0x1342 composition
  l2tp: Don't sleep and disable BH under writer-side sk_callback_lock
  net: dm9051: Fix missing dev_kfree_skb() in dm9051_loop_rx()
  arcnet: fix potential memory leak in com20020_probe()
  ipv4: Fix error return code in fib_table_insert()
  net: ethernet: mtk_eth_soc: fix memory leak in error path
  net: ethernet: mtk_eth_soc: fix resource leak in error path
  ...
This commit is contained in:
Linus Torvalds 2022-11-24 11:19:20 -08:00
commit 08ad43d554
119 changed files with 921 additions and 487 deletions

View file

@ -6,4 +6,4 @@ Generic Netlink
A wiki document on how to use Generic Netlink can be found here:
* http://www.linuxfoundation.org/collaborate/workgroups/networking/generic_netlink_howto
* https://wiki.linuxfoundation.org/networking/generic_netlink_howto

View file

@ -113,6 +113,7 @@ static int com20020_probe(struct pcmcia_device *p_dev)
struct com20020_dev *info;
struct net_device *dev;
struct arcnet_local *lp;
int ret = -ENOMEM;
dev_dbg(&p_dev->dev, "com20020_attach()\n");
@ -142,12 +143,18 @@ static int com20020_probe(struct pcmcia_device *p_dev)
info->dev = dev;
p_dev->priv = info;
return com20020_config(p_dev);
ret = com20020_config(p_dev);
if (ret)
goto fail_config;
return 0;
fail_config:
free_arcdev(dev);
fail_alloc_dev:
kfree(info);
fail_alloc_info:
return -ENOMEM;
return ret;
} /* com20020_attach */
static void com20020_detach(struct pcmcia_device *link)

View file

@ -3231,16 +3231,23 @@ static int bond_na_rcv(const struct sk_buff *skb, struct bonding *bond,
struct slave *slave)
{
struct slave *curr_active_slave, *curr_arp_slave;
struct icmp6hdr *hdr = icmp6_hdr(skb);
struct in6_addr *saddr, *daddr;
struct {
struct ipv6hdr ip6;
struct icmp6hdr icmp6;
} *combined, _combined;
if (skb->pkt_type == PACKET_OTHERHOST ||
skb->pkt_type == PACKET_LOOPBACK ||
hdr->icmp6_type != NDISC_NEIGHBOUR_ADVERTISEMENT)
skb->pkt_type == PACKET_LOOPBACK)
goto out;
saddr = &ipv6_hdr(skb)->saddr;
daddr = &ipv6_hdr(skb)->daddr;
combined = skb_header_pointer(skb, 0, sizeof(_combined), &_combined);
if (!combined || combined->ip6.nexthdr != NEXTHDR_ICMP ||
combined->icmp6.icmp6_type != NDISC_NEIGHBOUR_ADVERTISEMENT)
goto out;
saddr = &combined->ip6.saddr;
daddr = &combined->ip6.saddr;
slave_dbg(bond->dev, slave->dev, "%s: %s/%d av %d sv %d sip %pI6c tip %pI6c\n",
__func__, slave->dev->name, bond_slave_state(slave),

View file

@ -256,6 +256,9 @@ static int sja1105_base_tx_mdio_read(struct mii_bus *bus, int phy, int reg)
u32 tmp;
int rc;
if (reg & MII_ADDR_C45)
return -EOPNOTSUPP;
rc = sja1105_xfer_u32(priv, SPI_READ, regs->mdio_100base_tx + reg,
&tmp, NULL);
if (rc < 0)
@ -272,6 +275,9 @@ static int sja1105_base_tx_mdio_write(struct mii_bus *bus, int phy, int reg,
const struct sja1105_regs *regs = priv->info->regs;
u32 tmp = val;
if (reg & MII_ADDR_C45)
return -EOPNOTSUPP;
return sja1105_xfer_u32(priv, SPI_WRITE, regs->mdio_100base_tx + reg,
&tmp, NULL);
}

View file

@ -990,6 +990,7 @@ static int tse_shutdown(struct net_device *dev)
int ret;
phylink_stop(priv->phylink);
phylink_disconnect_phy(priv->phylink);
netif_stop_queue(dev);
napi_disable(&priv->napi);

View file

@ -795,16 +795,20 @@ static void bnx2x_vf_enable_traffic(struct bnx2x *bp, struct bnx2x_virtf *vf)
static u8 bnx2x_vf_is_pcie_pending(struct bnx2x *bp, u8 abs_vfid)
{
struct pci_dev *dev;
struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid);
struct pci_dev *dev;
bool pending;
if (!vf)
return false;
dev = pci_get_domain_bus_and_slot(vf->domain, vf->bus, vf->devfn);
if (dev)
return bnx2x_is_pcie_pending(dev);
return false;
if (!dev)
return false;
pending = bnx2x_is_pcie_pending(dev);
pci_dev_put(dev);
return pending;
}
int bnx2x_vf_flr_clnup_epilog(struct bnx2x *bp, u8 abs_vfid)

View file

@ -1794,7 +1794,7 @@ static int liquidio_open(struct net_device *netdev)
ifstate_set(lio, LIO_IFSTATE_RUNNING);
if (!OCTEON_CN23XX_PF(oct) || (OCTEON_CN23XX_PF(oct) && !oct->msix_on)) {
if (!OCTEON_CN23XX_PF(oct) || !oct->msix_on) {
ret = setup_tx_poll_fn(netdev);
if (ret)
goto err_poll;
@ -1824,7 +1824,7 @@ static int liquidio_open(struct net_device *netdev)
return 0;
err_rx_ctrl:
if (!OCTEON_CN23XX_PF(oct) || (OCTEON_CN23XX_PF(oct) && !oct->msix_on))
if (!OCTEON_CN23XX_PF(oct) || !oct->msix_on)
cleanup_tx_poll_fn(netdev);
err_poll:
if (lio->ptp_clock) {

View file

@ -1436,8 +1436,10 @@ static acpi_status bgx_acpi_match_id(acpi_handle handle, u32 lvl,
return AE_OK;
}
if (strncmp(string.pointer, bgx_sel, 4))
if (strncmp(string.pointer, bgx_sel, 4)) {
kfree(string.pointer);
return AE_OK;
}
acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
bgx_acpi_register_phy, NULL, bgx, NULL);

View file

@ -798,8 +798,10 @@ static int dm9051_loop_rx(struct board_info *db)
}
ret = dm9051_stop_mrcmd(db);
if (ret)
if (ret) {
dev_kfree_skb(skb);
return ret;
}
skb->protocol = eth_type_trans(skb, db->ndev);
if (db->ndev->features & NETIF_F_RXCSUM)

View file

@ -542,6 +542,27 @@ static bool tsnep_tx_poll(struct tsnep_tx *tx, int napi_budget)
return (budget != 0);
}
static bool tsnep_tx_pending(struct tsnep_tx *tx)
{
unsigned long flags;
struct tsnep_tx_entry *entry;
bool pending = false;
spin_lock_irqsave(&tx->lock, flags);
if (tx->read != tx->write) {
entry = &tx->entry[tx->read];
if ((__le32_to_cpu(entry->desc_wb->properties) &
TSNEP_TX_DESC_OWNER_MASK) ==
(entry->properties & TSNEP_TX_DESC_OWNER_MASK))
pending = true;
}
spin_unlock_irqrestore(&tx->lock, flags);
return pending;
}
static int tsnep_tx_open(struct tsnep_adapter *adapter, void __iomem *addr,
int queue_index, struct tsnep_tx *tx)
{
@ -821,6 +842,19 @@ static int tsnep_rx_poll(struct tsnep_rx *rx, struct napi_struct *napi,
return done;
}
static bool tsnep_rx_pending(struct tsnep_rx *rx)
{
struct tsnep_rx_entry *entry;
entry = &rx->entry[rx->read];
if ((__le32_to_cpu(entry->desc_wb->properties) &
TSNEP_DESC_OWNER_COUNTER_MASK) ==
(entry->properties & TSNEP_DESC_OWNER_COUNTER_MASK))
return true;
return false;
}
static int tsnep_rx_open(struct tsnep_adapter *adapter, void __iomem *addr,
int queue_index, struct tsnep_rx *rx)
{
@ -866,6 +900,17 @@ static void tsnep_rx_close(struct tsnep_rx *rx)
tsnep_rx_ring_cleanup(rx);
}
static bool tsnep_pending(struct tsnep_queue *queue)
{
if (queue->tx && tsnep_tx_pending(queue->tx))
return true;
if (queue->rx && tsnep_rx_pending(queue->rx))
return true;
return false;
}
static int tsnep_poll(struct napi_struct *napi, int budget)
{
struct tsnep_queue *queue = container_of(napi, struct tsnep_queue,
@ -886,9 +931,19 @@ static int tsnep_poll(struct napi_struct *napi, int budget)
if (!complete)
return budget;
if (likely(napi_complete_done(napi, done)))
if (likely(napi_complete_done(napi, done))) {
tsnep_enable_irq(queue->adapter, queue->irq_mask);
/* reschedule if work is already pending, prevent rotten packets
* which are transmitted or received after polling but before
* interrupt enable
*/
if (tsnep_pending(queue)) {
tsnep_disable_irq(queue->adapter, queue->irq_mask);
napi_schedule(napi);
}
}
return min(done, budget - 1);
}

View file

@ -2058,7 +2058,7 @@ static void enetc_setup_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring)
/* enable Tx ints by setting pkt thr to 1 */
enetc_txbdr_wr(hw, idx, ENETC_TBICR0, ENETC_TBICR0_ICEN | 0x1);
tbmr = ENETC_TBMR_EN;
tbmr = ENETC_TBMR_EN | ENETC_TBMR_SET_PRIO(tx_ring->prio);
if (tx_ring->ndev->features & NETIF_F_HW_VLAN_CTAG_TX)
tbmr |= ENETC_TBMR_VIH;
@ -2461,7 +2461,8 @@ int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data)
/* Reset all ring priorities to 0 */
for (i = 0; i < priv->num_tx_rings; i++) {
tx_ring = priv->tx_ring[i];
enetc_set_bdr_prio(hw, tx_ring->index, 0);
tx_ring->prio = 0;
enetc_set_bdr_prio(hw, tx_ring->index, tx_ring->prio);
}
return 0;
@ -2480,7 +2481,8 @@ int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data)
*/
for (i = 0; i < num_tc; i++) {
tx_ring = priv->tx_ring[i];
enetc_set_bdr_prio(hw, tx_ring->index, i);
tx_ring->prio = i;
enetc_set_bdr_prio(hw, tx_ring->index, tx_ring->prio);
}
/* Reset the number of netdev queues based on the TC count */

View file

@ -95,6 +95,7 @@ struct enetc_bdr {
void __iomem *rcir;
};
u16 index;
u16 prio;
int bd_count; /* # of BDs */
int next_to_use;
int next_to_clean;

View file

@ -137,6 +137,7 @@ int enetc_setup_tc_taprio(struct net_device *ndev, void *type_data)
struct tc_taprio_qopt_offload *taprio = type_data;
struct enetc_ndev_priv *priv = netdev_priv(ndev);
struct enetc_hw *hw = &priv->si->hw;
struct enetc_bdr *tx_ring;
int err;
int i;
@ -145,16 +146,20 @@ int enetc_setup_tc_taprio(struct net_device *ndev, void *type_data)
if (priv->tx_ring[i]->tsd_enable)
return -EBUSY;
for (i = 0; i < priv->num_tx_rings; i++)
enetc_set_bdr_prio(hw, priv->tx_ring[i]->index,
taprio->enable ? i : 0);
for (i = 0; i < priv->num_tx_rings; i++) {
tx_ring = priv->tx_ring[i];
tx_ring->prio = taprio->enable ? i : 0;
enetc_set_bdr_prio(hw, tx_ring->index, tx_ring->prio);
}
err = enetc_setup_taprio(ndev, taprio);
if (err)
for (i = 0; i < priv->num_tx_rings; i++)
enetc_set_bdr_prio(hw, priv->tx_ring[i]->index,
taprio->enable ? 0 : i);
if (err) {
for (i = 0; i < priv->num_tx_rings; i++) {
tx_ring = priv->tx_ring[i];
tx_ring->prio = taprio->enable ? 0 : i;
enetc_set_bdr_prio(hw, tx_ring->index, tx_ring->prio);
}
}
return err;
}

View file

@ -298,7 +298,6 @@ struct iavf_adapter {
#define IAVF_FLAG_QUEUES_DISABLED BIT(17)
#define IAVF_FLAG_SETUP_NETDEV_FEATURES BIT(18)
#define IAVF_FLAG_REINIT_MSIX_NEEDED BIT(20)
#define IAVF_FLAG_INITIAL_MAC_SET BIT(23)
/* duplicates for common code */
#define IAVF_FLAG_DCB_ENABLED 0
/* flags for admin queue service task */

View file

@ -1087,12 +1087,6 @@ static int iavf_set_mac(struct net_device *netdev, void *p)
if (ret)
return ret;
/* If this is an initial set MAC during VF spawn do not wait */
if (adapter->flags & IAVF_FLAG_INITIAL_MAC_SET) {
adapter->flags &= ~IAVF_FLAG_INITIAL_MAC_SET;
return 0;
}
ret = wait_event_interruptible_timeout(adapter->vc_waitqueue,
iavf_is_mac_set_handled(netdev, addr->sa_data),
msecs_to_jiffies(2500));
@ -2605,8 +2599,6 @@ static void iavf_init_config_adapter(struct iavf_adapter *adapter)
ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr);
}
adapter->flags |= IAVF_FLAG_INITIAL_MAC_SET;
adapter->tx_desc_count = IAVF_DEFAULT_TXD;
adapter->rx_desc_count = IAVF_DEFAULT_RXD;
err = iavf_init_interrupt_scheme(adapter);
@ -2921,7 +2913,6 @@ static void iavf_disable_vf(struct iavf_adapter *adapter)
iavf_free_queues(adapter);
memset(adapter->vf_res, 0, IAVF_VIRTCHNL_VF_RESOURCE_SIZE);
iavf_shutdown_adminq(&adapter->hw);
adapter->netdev->flags &= ~IFF_UP;
adapter->flags &= ~IAVF_FLAG_RESET_PENDING;
iavf_change_state(adapter, __IAVF_DOWN);
wake_up(&adapter->down_waitqueue);
@ -3021,6 +3012,11 @@ static void iavf_reset_task(struct work_struct *work)
iavf_disable_vf(adapter);
mutex_unlock(&adapter->client_lock);
mutex_unlock(&adapter->crit_lock);
if (netif_running(netdev)) {
rtnl_lock();
dev_close(netdev);
rtnl_unlock();
}
return; /* Do not attempt to reinit. It's dead, Jim. */
}
@ -3033,6 +3029,7 @@ continue_reset:
if (running) {
netif_carrier_off(netdev);
netif_tx_stop_all_queues(netdev);
adapter->link_up = false;
iavf_napi_disable_all(adapter);
}
@ -3172,6 +3169,16 @@ reset_err:
mutex_unlock(&adapter->client_lock);
mutex_unlock(&adapter->crit_lock);
if (netif_running(netdev)) {
/* Close device to ensure that Tx queues will not be started
* during netif_device_attach() at the end of the reset task.
*/
rtnl_lock();
dev_close(netdev);
rtnl_unlock();
}
dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n");
reset_finish:
rtnl_lock();
@ -5035,23 +5042,21 @@ static int __maybe_unused iavf_resume(struct device *dev_d)
static void iavf_remove(struct pci_dev *pdev)
{
struct iavf_adapter *adapter = iavf_pdev_to_adapter(pdev);
struct net_device *netdev = adapter->netdev;
struct iavf_fdir_fltr *fdir, *fdirtmp;
struct iavf_vlan_filter *vlf, *vlftmp;
struct iavf_cloud_filter *cf, *cftmp;
struct iavf_adv_rss *rss, *rsstmp;
struct iavf_mac_filter *f, *ftmp;
struct iavf_cloud_filter *cf, *cftmp;
struct iavf_hw *hw = &adapter->hw;
struct net_device *netdev;
struct iavf_hw *hw;
int err;
/* When reboot/shutdown is in progress no need to do anything
* as the adapter is already REMOVE state that was set during
* iavf_shutdown() callback.
*/
if (adapter->state == __IAVF_REMOVE)
netdev = adapter->netdev;
hw = &adapter->hw;
if (test_and_set_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section))
return;
set_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section);
/* Wait until port initialization is complete.
* There are flows where register/unregister netdev may race.
*/

View file

@ -3145,15 +3145,15 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
*/
static irqreturn_t ice_misc_intr_thread_fn(int __always_unused irq, void *data)
{
irqreturn_t ret = IRQ_HANDLED;
struct ice_pf *pf = data;
bool irq_handled;
irq_handled = ice_ptp_process_ts(pf);
if (!irq_handled)
ret = IRQ_WAKE_THREAD;
if (ice_is_reset_in_progress(pf->state))
return IRQ_HANDLED;
return ret;
while (!ice_ptp_process_ts(pf))
usleep_range(50, 100);
return IRQ_HANDLED;
}
/**

View file

@ -614,11 +614,14 @@ static u64 ice_ptp_extend_40b_ts(struct ice_pf *pf, u64 in_tstamp)
* 2) extend the 40b timestamp value to get a 64bit timestamp
* 3) send that timestamp to the stack
*
* After looping, if we still have waiting SKBs, return true. This may cause us
* effectively poll even when not strictly necessary. We do this because it's
* possible a new timestamp was requested around the same time as the interrupt.
* In some cases hardware might not interrupt us again when the timestamp is
* captured.
* Returns true if all timestamps were handled, and false if any slots remain
* without a timestamp.
*
* After looping, if we still have waiting SKBs, return false. This may cause
* us effectively poll even when not strictly necessary. We do this because
* it's possible a new timestamp was requested around the same time as the
* interrupt. In some cases hardware might not interrupt us again when the
* timestamp is captured.
*
* Note that we only take the tracking lock when clearing the bit and when
* checking if we need to re-queue this task. The only place where bits can be
@ -641,7 +644,7 @@ static bool ice_ptp_tx_tstamp(struct ice_ptp_tx *tx)
u8 idx;
if (!tx->init)
return false;
return true;
ptp_port = container_of(tx, struct ice_ptp_port, tx);
pf = ptp_port_to_pf(ptp_port);
@ -2381,10 +2384,7 @@ s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb)
*/
bool ice_ptp_process_ts(struct ice_pf *pf)
{
if (pf->ptp.port.tx.init)
return ice_ptp_tx_tstamp(&pf->ptp.port.tx);
return false;
return ice_ptp_tx_tstamp(&pf->ptp.port.tx);
}
static void ice_ptp_periodic_work(struct kthread_work *work)

View file

@ -7350,6 +7350,7 @@ static int mvpp2_get_sram(struct platform_device *pdev,
struct mvpp2 *priv)
{
struct resource *res;
void __iomem *base;
res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
if (!res) {
@ -7360,9 +7361,12 @@ static int mvpp2_get_sram(struct platform_device *pdev,
return 0;
}
priv->cm3_base = devm_ioremap_resource(&pdev->dev, res);
base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(base))
return PTR_ERR(base);
return PTR_ERR_OR_ZERO(priv->cm3_base);
priv->cm3_base = base;
return 0;
}
static int mvpp2_probe(struct platform_device *pdev)

View file

@ -32,7 +32,6 @@ config OCTEONTX2_PF
tristate "Marvell OcteonTX2 NIC Physical Function driver"
select OCTEONTX2_MBOX
select NET_DEVLINK
depends on MACSEC || !MACSEC
depends on (64BIT && COMPILE_TEST) || ARM64
select DIMLIB
depends on PCI

View file

@ -951,7 +951,7 @@ static void mcs_bbe_intr_handler(struct mcs *mcs, u64 intr, enum mcs_direction d
else
event.intr_mask = (dir == MCS_RX) ?
MCS_BBE_RX_PLFIFO_OVERFLOW_INT :
MCS_BBE_RX_PLFIFO_OVERFLOW_INT;
MCS_BBE_TX_PLFIFO_OVERFLOW_INT;
/* Notify the lmac_id info which ran into BBE fatal error */
event.lmac_id = i & 0x3ULL;

View file

@ -880,6 +880,8 @@ static int rvu_dbg_rvu_pf_cgx_map_display(struct seq_file *filp, void *unused)
sprintf(lmac, "LMAC%d", lmac_id);
seq_printf(filp, "%s\t0x%x\t\tNIX%d\t\t%s\t%s\n",
dev_name(&pdev->dev), pcifunc, blkid, cgx, lmac);
pci_dev_put(pdev);
}
return 0;
}
@ -2566,6 +2568,7 @@ static int cgx_print_dmac_flt(struct seq_file *s, int lmac_id)
}
}
pci_dev_put(pdev);
return 0;
}

View file

@ -4985,6 +4985,8 @@ static int nix_setup_ipolicers(struct rvu *rvu,
ipolicer->ref_count = devm_kcalloc(rvu->dev,
ipolicer->band_prof.max,
sizeof(u16), GFP_KERNEL);
if (!ipolicer->ref_count)
return -ENOMEM;
}
/* Set policer timeunit to 2us ie (19 + 1) * 100 nsec = 2us */

View file

@ -62,15 +62,18 @@ int rvu_sdp_init(struct rvu *rvu)
pfvf->sdp_info = devm_kzalloc(rvu->dev,
sizeof(struct sdp_node_info),
GFP_KERNEL);
if (!pfvf->sdp_info)
if (!pfvf->sdp_info) {
pci_dev_put(pdev);
return -ENOMEM;
}
dev_info(rvu->dev, "SDP PF number:%d\n", sdp_pf_num[i]);
put_device(&pdev->dev);
i++;
}
pci_dev_put(pdev);
return 0;
}

View file

@ -746,6 +746,7 @@ static int prestera_port_create(struct prestera_switch *sw, u32 id)
return 0;
err_sfp_bind:
unregister_netdev(dev);
err_register_netdev:
prestera_port_list_del(port);
err_port_init:

View file

@ -2378,8 +2378,10 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
data + NET_SKB_PAD + eth->ip_align,
ring->buf_size, DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(eth->dma_dev,
dma_addr)))
dma_addr))) {
skb_free_frag(data);
return -ENOMEM;
}
}
rxd->rxd1 = (unsigned int)dma_addr;
ring->data[i] = data;
@ -2996,8 +2998,10 @@ static int mtk_open(struct net_device *dev)
int i;
err = mtk_start_dma(eth);
if (err)
if (err) {
phylink_disconnect_phy(mac->phylink);
return err;
}
for (i = 0; i < ARRAY_SIZE(eth->ppe); i++)
mtk_ppe_start(eth->ppe[i]);
@ -4143,13 +4147,13 @@ static int mtk_probe(struct platform_device *pdev)
eth->soc->offload_version, i);
if (!eth->ppe[i]) {
err = -ENOMEM;
goto err_free_dev;
goto err_deinit_ppe;
}
}
err = mtk_eth_offload_init(eth);
if (err)
goto err_free_dev;
goto err_deinit_ppe;
}
for (i = 0; i < MTK_MAX_DEVS; i++) {
@ -4159,7 +4163,7 @@ static int mtk_probe(struct platform_device *pdev)
err = register_netdev(eth->netdev[i]);
if (err) {
dev_err(eth->dev, "error bringing up device\n");
goto err_deinit_mdio;
goto err_deinit_ppe;
} else
netif_info(eth, probe, eth->netdev[i],
"mediatek frame engine at 0x%08lx, irq %d\n",
@ -4177,7 +4181,8 @@ static int mtk_probe(struct platform_device *pdev)
return 0;
err_deinit_mdio:
err_deinit_ppe:
mtk_ppe_deinit(eth);
mtk_mdio_cleanup(eth);
err_free_dev:
mtk_free_dev(eth);

View file

@ -737,7 +737,7 @@ struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base,
MTK_PPE_ENTRIES * soc->foe_entry_size,
&ppe->foe_phys, GFP_KERNEL);
if (!foe)
return NULL;
goto err_free_l2_flows;
ppe->foe_table = foe;
@ -745,11 +745,26 @@ struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base,
sizeof(*ppe->foe_flow);
ppe->foe_flow = devm_kzalloc(dev, foe_flow_size, GFP_KERNEL);
if (!ppe->foe_flow)
return NULL;
goto err_free_l2_flows;
mtk_ppe_debugfs_init(ppe, index);
return ppe;
err_free_l2_flows:
rhashtable_destroy(&ppe->l2_flows);
return NULL;
}
void mtk_ppe_deinit(struct mtk_eth *eth)
{
int i;
for (i = 0; i < ARRAY_SIZE(eth->ppe); i++) {
if (!eth->ppe[i])
return;
rhashtable_destroy(&eth->ppe[i]->l2_flows);
}
}
static void mtk_ppe_init_foe_table(struct mtk_ppe *ppe)

View file

@ -304,6 +304,7 @@ struct mtk_ppe {
struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base,
int version, int index);
void mtk_ppe_deinit(struct mtk_eth *eth);
void mtk_ppe_start(struct mtk_ppe *ppe);
int mtk_ppe_stop(struct mtk_ppe *ppe);

View file

@ -697,7 +697,8 @@ static int mlx4_create_zones(struct mlx4_dev *dev,
err = mlx4_bitmap_init(*bitmap + k, 1,
MLX4_QP_TABLE_RAW_ETH_SIZE - 1, 0,
0);
mlx4_bitmap_alloc_range(*bitmap + k, 1, 1, 0);
if (!err)
mlx4_bitmap_alloc_range(*bitmap + k, 1, 1, 0);
}
if (err)

View file

@ -45,6 +45,8 @@
#include "mlx5_core.h"
#include "lib/eq.h"
#include "lib/tout.h"
#define CREATE_TRACE_POINTS
#include "diag/cmd_tracepoint.h"
enum {
CMD_IF_REV = 5,
@ -785,27 +787,14 @@ EXPORT_SYMBOL(mlx5_cmd_out_err);
static void cmd_status_print(struct mlx5_core_dev *dev, void *in, void *out)
{
u16 opcode, op_mod;
u32 syndrome;
u8 status;
u16 uid;
int err;
syndrome = MLX5_GET(mbox_out, out, syndrome);
status = MLX5_GET(mbox_out, out, status);
opcode = MLX5_GET(mbox_in, in, opcode);
op_mod = MLX5_GET(mbox_in, in, op_mod);
uid = MLX5_GET(mbox_in, in, uid);
err = cmd_status_to_err(status);
if (!uid && opcode != MLX5_CMD_OP_DESTROY_MKEY)
mlx5_cmd_out_err(dev, opcode, op_mod, out);
else
mlx5_core_dbg(dev,
"%s(0x%x) op_mod(0x%x) uid(%d) failed, status %s(0x%x), syndrome (0x%x), err(%d)\n",
mlx5_command_str(opcode), opcode, op_mod, uid,
cmd_status_str(status), status, syndrome, err);
}
int mlx5_cmd_check(struct mlx5_core_dev *dev, int err, void *in, void *out)
@ -1016,6 +1005,7 @@ static void cmd_work_handler(struct work_struct *work)
cmd_ent_get(ent);
set_bit(MLX5_CMD_ENT_STATE_PENDING_COMP, &ent->state);
cmd_ent_get(ent); /* for the _real_ FW event on completion */
/* Skip sending command to fw if internal error */
if (mlx5_cmd_is_down(dev) || !opcode_allowed(&dev->cmd, ent->op)) {
ent->ret = -ENXIO;
@ -1023,7 +1013,6 @@ static void cmd_work_handler(struct work_struct *work)
return;
}
cmd_ent_get(ent); /* for the _real_ FW event on completion */
/* ring doorbell after the descriptor is valid */
mlx5_core_dbg(dev, "writing 0x%x to command doorbell\n", 1 << ent->idx);
wmb();
@ -1672,8 +1661,8 @@ static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool force
cmd_ent_put(ent); /* timeout work was canceled */
if (!forced || /* Real FW completion */
pci_channel_offline(dev->pdev) || /* FW is inaccessible */
dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
mlx5_cmd_is_down(dev) || /* No real FW completion is expected */
!opcode_allowed(cmd, ent->op))
cmd_ent_put(ent);
ent->ts2 = ktime_get_ns();
@ -1892,6 +1881,16 @@ out_in:
return err;
}
static void mlx5_cmd_err_trace(struct mlx5_core_dev *dev, u16 opcode, u16 op_mod, void *out)
{
u32 syndrome = MLX5_GET(mbox_out, out, syndrome);
u8 status = MLX5_GET(mbox_out, out, status);
trace_mlx5_cmd(mlx5_command_str(opcode), opcode, op_mod,
cmd_status_str(status), status, syndrome,
cmd_status_to_err(status));
}
static void cmd_status_log(struct mlx5_core_dev *dev, u16 opcode, u8 status,
u32 syndrome, int err)
{
@ -1914,7 +1913,7 @@ static void cmd_status_log(struct mlx5_core_dev *dev, u16 opcode, u8 status,
}
/* preserve -EREMOTEIO for outbox.status != OK, otherwise return err as is */
static int cmd_status_err(struct mlx5_core_dev *dev, int err, u16 opcode, void *out)
static int cmd_status_err(struct mlx5_core_dev *dev, int err, u16 opcode, u16 op_mod, void *out)
{
u32 syndrome = MLX5_GET(mbox_out, out, syndrome);
u8 status = MLX5_GET(mbox_out, out, status);
@ -1922,8 +1921,10 @@ static int cmd_status_err(struct mlx5_core_dev *dev, int err, u16 opcode, void *
if (err == -EREMOTEIO) /* -EREMOTEIO is preserved */
err = -EIO;
if (!err && status != MLX5_CMD_STAT_OK)
if (!err && status != MLX5_CMD_STAT_OK) {
err = -EREMOTEIO;
mlx5_cmd_err_trace(dev, opcode, op_mod, out);
}
cmd_status_log(dev, opcode, status, syndrome, err);
return err;
@ -1951,9 +1952,9 @@ int mlx5_cmd_do(struct mlx5_core_dev *dev, void *in, int in_size, void *out, int
{
int err = cmd_exec(dev, in, in_size, out, out_size, NULL, NULL, false);
u16 opcode = MLX5_GET(mbox_in, in, opcode);
u16 op_mod = MLX5_GET(mbox_in, in, op_mod);
err = cmd_status_err(dev, err, opcode, out);
return err;
return cmd_status_err(dev, err, opcode, op_mod, out);
}
EXPORT_SYMBOL(mlx5_cmd_do);
@ -1997,8 +1998,9 @@ int mlx5_cmd_exec_polling(struct mlx5_core_dev *dev, void *in, int in_size,
{
int err = cmd_exec(dev, in, in_size, out, out_size, NULL, NULL, true);
u16 opcode = MLX5_GET(mbox_in, in, opcode);
u16 op_mod = MLX5_GET(mbox_in, in, op_mod);
err = cmd_status_err(dev, err, opcode, out);
err = cmd_status_err(dev, err, opcode, op_mod, out);
return mlx5_cmd_check(dev, err, in, out);
}
EXPORT_SYMBOL(mlx5_cmd_exec_polling);
@ -2034,7 +2036,7 @@ static void mlx5_cmd_exec_cb_handler(int status, void *_work)
struct mlx5_async_ctx *ctx;
ctx = work->ctx;
status = cmd_status_err(ctx->dev, status, work->opcode, work->out);
status = cmd_status_err(ctx->dev, status, work->opcode, work->op_mod, work->out);
work->user_callback(status, work);
if (atomic_dec_and_test(&ctx->num_inflight))
complete(&ctx->inflight_done);
@ -2049,6 +2051,7 @@ int mlx5_cmd_exec_cb(struct mlx5_async_ctx *ctx, void *in, int in_size,
work->ctx = ctx;
work->user_callback = callback;
work->opcode = MLX5_GET(mbox_in, in, opcode);
work->op_mod = MLX5_GET(mbox_in, in, op_mod);
work->out = out;
if (WARN_ON(!atomic_inc_not_zero(&ctx->num_inflight)))
return -EIO;

View file

@ -0,0 +1,45 @@
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
#undef TRACE_SYSTEM
#define TRACE_SYSTEM mlx5
#if !defined(_MLX5_CMD_TP_H_) || defined(TRACE_HEADER_MULTI_READ)
#define _MLX5_CMD_TP_H_
#include <linux/tracepoint.h>
#include <linux/trace_seq.h>
TRACE_EVENT(mlx5_cmd,
TP_PROTO(const char *command_str, u16 opcode, u16 op_mod,
const char *status_str, u8 status, u32 syndrome, int err),
TP_ARGS(command_str, opcode, op_mod, status_str, status, syndrome, err),
TP_STRUCT__entry(__string(command_str, command_str)
__field(u16, opcode)
__field(u16, op_mod)
__string(status_str, status_str)
__field(u8, status)
__field(u32, syndrome)
__field(int, err)
),
TP_fast_assign(__assign_str(command_str, command_str);
__entry->opcode = opcode;
__entry->op_mod = op_mod;
__assign_str(status_str, status_str);
__entry->status = status;
__entry->syndrome = syndrome;
__entry->err = err;
),
TP_printk("%s(0x%x) op_mod(0x%x) failed, status %s(0x%x), syndrome (0x%x), err(%d)",
__get_str(command_str), __entry->opcode, __entry->op_mod,
__get_str(status_str), __entry->status, __entry->syndrome,
__entry->err)
);
#endif /* _MLX5_CMD_TP_H_ */
#undef TRACE_INCLUDE_PATH
#define TRACE_INCLUDE_PATH ./diag
#undef TRACE_INCLUDE_FILE
#define TRACE_INCLUDE_FILE cmd_tracepoint
#include <trace/define_trace.h>

View file

@ -638,7 +638,7 @@ static void mlx5_tracer_handle_timestamp_trace(struct mlx5_fw_tracer *tracer,
trace_timestamp = (timestamp_event.timestamp & MASK_52_7) |
(str_frmt->timestamp & MASK_6_0);
else
trace_timestamp = ((timestamp_event.timestamp & MASK_52_7) - 1) |
trace_timestamp = ((timestamp_event.timestamp - 1) & MASK_52_7) |
(str_frmt->timestamp & MASK_6_0);
mlx5_tracer_print_trace(str_frmt, dev, trace_timestamp);

View file

@ -224,15 +224,16 @@ void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
list_for_each_entry(flow, flow_list, tmp_list) {
if (!mlx5e_is_offloaded_flow(flow) || flow_flag_test(flow, SLOW))
continue;
spec = &flow->attr->parse_attr->spec;
/* update from encap rule to slow path rule */
rule = mlx5e_tc_offload_to_slow_path(esw, flow, spec);
attr = mlx5e_tc_get_encap_attr(flow);
esw_attr = attr->esw_attr;
/* mark the flow's encap dest as non-valid */
esw_attr->dests[flow->tmp_entry_index].flags &= ~MLX5_ESW_DEST_ENCAP_VALID;
esw_attr->dests[flow->tmp_entry_index].pkt_reformat = NULL;
/* update from encap rule to slow path rule */
spec = &flow->attr->parse_attr->spec;
rule = mlx5e_tc_offload_to_slow_path(esw, flow, spec);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
@ -251,6 +252,7 @@ void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
/* we know that the encap is valid */
e->flags &= ~MLX5_ENCAP_ENTRY_VALID;
mlx5_packet_reformat_dealloc(priv->mdev, e->pkt_reformat);
e->pkt_reformat = NULL;
}
static void mlx5e_take_tmp_flow(struct mlx5e_tc_flow *flow,
@ -762,8 +764,7 @@ int mlx5e_attach_encap(struct mlx5e_priv *priv,
struct net_device *mirred_dev,
int out_index,
struct netlink_ext_ack *extack,
struct net_device **encap_dev,
bool *encap_valid)
struct net_device **encap_dev)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5e_tc_flow_parse_attr *parse_attr;
@ -878,9 +879,8 @@ attach_flow:
if (e->flags & MLX5_ENCAP_ENTRY_VALID) {
attr->esw_attr->dests[out_index].pkt_reformat = e->pkt_reformat;
attr->esw_attr->dests[out_index].flags |= MLX5_ESW_DEST_ENCAP_VALID;
*encap_valid = true;
} else {
*encap_valid = false;
flow_flag_set(flow, SLOW);
}
mutex_unlock(&esw->offloads.encap_tbl_lock);

View file

@ -17,8 +17,7 @@ int mlx5e_attach_encap(struct mlx5e_priv *priv,
struct net_device *mirred_dev,
int out_index,
struct netlink_ext_ack *extack,
struct net_device **encap_dev,
bool *encap_valid);
struct net_device **encap_dev);
int mlx5e_attach_decap(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow,

View file

@ -368,15 +368,15 @@ static int mlx5e_macsec_init_sa(struct macsec_context *ctx,
obj_attrs.aso_pdn = macsec->aso.pdn;
obj_attrs.epn_state = sa->epn_state;
if (is_tx) {
obj_attrs.ssci = cpu_to_be32((__force u32)ctx->sa.tx_sa->ssci);
key = &ctx->sa.tx_sa->key;
} else {
obj_attrs.ssci = cpu_to_be32((__force u32)ctx->sa.rx_sa->ssci);
key = &ctx->sa.rx_sa->key;
key = (is_tx) ? &ctx->sa.tx_sa->key : &ctx->sa.rx_sa->key;
if (sa->epn_state.epn_enabled) {
obj_attrs.ssci = (is_tx) ? cpu_to_be32((__force u32)ctx->sa.tx_sa->ssci) :
cpu_to_be32((__force u32)ctx->sa.rx_sa->ssci);
memcpy(&obj_attrs.salt, &key->salt, sizeof(key->salt));
}
memcpy(&obj_attrs.salt, &key->salt, sizeof(key->salt));
obj_attrs.replay_window = ctx->secy->replay_window;
obj_attrs.replay_protect = ctx->secy->replay_protect;
@ -1155,7 +1155,7 @@ static int macsec_upd_secy_hw_address(struct macsec_context *ctx,
continue;
if (rx_sa->active) {
err = mlx5e_macsec_init_sa(ctx, rx_sa, false, false);
err = mlx5e_macsec_init_sa(ctx, rx_sa, true, false);
if (err)
goto out;
}
@ -1536,6 +1536,8 @@ static void macsec_async_event(struct work_struct *work)
async_work = container_of(work, struct mlx5e_macsec_async_work, work);
macsec = async_work->macsec;
mutex_lock(&macsec->lock);
mdev = async_work->mdev;
obj_id = async_work->obj_id;
macsec_sa = get_macsec_tx_sa_from_obj_id(macsec, obj_id);
@ -1557,6 +1559,7 @@ static void macsec_async_event(struct work_struct *work)
out_async_work:
kfree(async_work);
mutex_unlock(&macsec->lock);
}
static int macsec_obj_change_event(struct notifier_block *nb, unsigned long event, void *data)

View file

@ -35,7 +35,6 @@
#include "en.h"
#include "en/port.h"
#include "en/params.h"
#include "en/xsk/pool.h"
#include "en/ptp.h"
#include "lib/clock.h"
#include "en/fs_ethtool.h"
@ -412,15 +411,8 @@ void mlx5e_ethtool_get_channels(struct mlx5e_priv *priv,
struct ethtool_channels *ch)
{
mutex_lock(&priv->state_lock);
ch->max_combined = priv->max_nch;
ch->combined_count = priv->channels.params.num_channels;
if (priv->xsk.refcnt) {
/* The upper half are XSK queues. */
ch->max_combined *= 2;
ch->combined_count *= 2;
}
mutex_unlock(&priv->state_lock);
}
@ -454,16 +446,6 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv,
mutex_lock(&priv->state_lock);
/* Don't allow changing the number of channels if there is an active
* XSK, because the numeration of the XSK and regular RQs will change.
*/
if (priv->xsk.refcnt) {
err = -EINVAL;
netdev_err(priv->netdev, "%s: AF_XDP is active, cannot change the number of channels\n",
__func__);
goto out;
}
/* Don't allow changing the number of channels if HTB offload is active,
* because the numeration of the QoS SQs will change, while per-queue
* qdiscs are attached.

View file

@ -206,10 +206,11 @@ static void mlx5e_disable_blocking_events(struct mlx5e_priv *priv)
static u16 mlx5e_mpwrq_umr_octowords(u32 entries, enum mlx5e_mpwrq_umr_mode umr_mode)
{
u8 umr_entry_size = mlx5e_mpwrq_umr_entry_size(umr_mode);
u32 sz;
WARN_ON_ONCE(entries * umr_entry_size % MLX5_OCTWORD);
sz = ALIGN(entries * umr_entry_size, MLX5_UMR_MTT_ALIGNMENT);
return entries * umr_entry_size / MLX5_OCTWORD;
return sz / MLX5_OCTWORD;
}
static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq,

View file

@ -1634,7 +1634,6 @@ set_encap_dests(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow,
struct mlx5_flow_attr *attr,
struct netlink_ext_ack *extack,
bool *encap_valid,
bool *vf_tun)
{
struct mlx5e_tc_flow_parse_attr *parse_attr;
@ -1651,7 +1650,6 @@ set_encap_dests(struct mlx5e_priv *priv,
parse_attr = attr->parse_attr;
esw_attr = attr->esw_attr;
*vf_tun = false;
*encap_valid = true;
for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) {
struct net_device *out_dev;
@ -1668,7 +1666,7 @@ set_encap_dests(struct mlx5e_priv *priv,
goto out;
}
err = mlx5e_attach_encap(priv, flow, attr, out_dev, out_index,
extack, &encap_dev, encap_valid);
extack, &encap_dev);
dev_put(out_dev);
if (err)
goto out;
@ -1732,8 +1730,8 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
struct mlx5e_tc_flow_parse_attr *parse_attr;
struct mlx5_flow_attr *attr = flow->attr;
struct mlx5_esw_flow_attr *esw_attr;
bool vf_tun, encap_valid;
u32 max_prio, max_chain;
bool vf_tun;
int err = 0;
parse_attr = attr->parse_attr;
@ -1823,7 +1821,7 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
esw_attr->int_port = int_port;
}
err = set_encap_dests(priv, flow, attr, extack, &encap_valid, &vf_tun);
err = set_encap_dests(priv, flow, attr, extack, &vf_tun);
if (err)
goto err_out;
@ -1853,7 +1851,7 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
* (1) there's no error
* (2) there's an encap action and we don't have valid neigh
*/
if (!encap_valid || flow_flag_test(flow, SLOW))
if (flow_flag_test(flow, SLOW))
flow->rule[0] = mlx5e_tc_offload_to_slow_path(esw, flow, &parse_attr->spec);
else
flow->rule[0] = mlx5e_tc_offload_fdb_rules(esw, flow, &parse_attr->spec, attr);
@ -3759,7 +3757,7 @@ alloc_flow_post_acts(struct mlx5e_tc_flow *flow, struct netlink_ext_ack *extack)
struct mlx5e_post_act *post_act = get_post_action(flow->priv);
struct mlx5_flow_attr *attr, *next_attr = NULL;
struct mlx5e_post_act_handle *handle;
bool vf_tun, encap_valid = true;
bool vf_tun;
int err;
/* This is going in reverse order as needed.
@ -3781,13 +3779,10 @@ alloc_flow_post_acts(struct mlx5e_tc_flow *flow, struct netlink_ext_ack *extack)
if (list_is_last(&attr->list, &flow->attrs))
break;
err = set_encap_dests(flow->priv, flow, attr, extack, &encap_valid, &vf_tun);
err = set_encap_dests(flow->priv, flow, attr, extack, &vf_tun);
if (err)
goto out_free;
if (!encap_valid)
flow_flag_set(flow, SLOW);
err = actions_prepare_mod_hdr_actions(flow->priv, flow, attr, extack);
if (err)
goto out_free;

View file

@ -433,7 +433,7 @@ esw_setup_vport_dest(struct mlx5_flow_destination *dest, struct mlx5_flow_act *f
mlx5_lag_mpesw_is_activated(esw->dev))
dest[dest_idx].type = MLX5_FLOW_DESTINATION_TYPE_UPLINK;
}
if (esw_attr->dests[attr_idx].flags & MLX5_ESW_DEST_ENCAP) {
if (esw_attr->dests[attr_idx].flags & MLX5_ESW_DEST_ENCAP_VALID) {
if (pkt_reformat) {
flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
flow_act->pkt_reformat = esw_attr->dests[attr_idx].pkt_reformat;

View file

@ -9,7 +9,8 @@ enum {
MLX5_FW_RESET_FLAGS_RESET_REQUESTED,
MLX5_FW_RESET_FLAGS_NACK_RESET_REQUEST,
MLX5_FW_RESET_FLAGS_PENDING_COMP,
MLX5_FW_RESET_FLAGS_DROP_NEW_REQUESTS
MLX5_FW_RESET_FLAGS_DROP_NEW_REQUESTS,
MLX5_FW_RESET_FLAGS_RELOAD_REQUIRED
};
struct mlx5_fw_reset {
@ -406,7 +407,7 @@ static void mlx5_sync_reset_now_event(struct work_struct *work)
err = mlx5_pci_link_toggle(dev);
if (err) {
mlx5_core_warn(dev, "mlx5_pci_link_toggle failed, no reset done, err %d\n", err);
goto done;
set_bit(MLX5_FW_RESET_FLAGS_RELOAD_REQUIRED, &fw_reset->reset_flags);
}
mlx5_enter_error_state(dev, true);
@ -482,6 +483,10 @@ int mlx5_fw_reset_wait_reset_done(struct mlx5_core_dev *dev)
goto out;
}
err = fw_reset->ret;
if (test_and_clear_bit(MLX5_FW_RESET_FLAGS_RELOAD_REQUIRED, &fw_reset->reset_flags)) {
mlx5_unload_one_devl_locked(dev);
mlx5_load_one_devl_locked(dev, false);
}
out:
clear_bit(MLX5_FW_RESET_FLAGS_PENDING_COMP, &fw_reset->reset_flags);
return err;

View file

@ -228,9 +228,8 @@ static void mlx5_ldev_free(struct kref *ref)
if (ldev->nb.notifier_call)
unregister_netdevice_notifier_net(&init_net, &ldev->nb);
mlx5_lag_mp_cleanup(ldev);
mlx5_lag_mpesw_cleanup(ldev);
cancel_work_sync(&ldev->mpesw_work);
destroy_workqueue(ldev->wq);
mlx5_lag_mpesw_cleanup(ldev);
mutex_destroy(&ldev->lock);
kfree(ldev);
}

View file

@ -50,6 +50,19 @@ struct lag_tracker {
enum netdev_lag_hash hash_type;
};
enum mpesw_op {
MLX5_MPESW_OP_ENABLE,
MLX5_MPESW_OP_DISABLE,
};
struct mlx5_mpesw_work_st {
struct work_struct work;
struct mlx5_lag *lag;
enum mpesw_op op;
struct completion comp;
int result;
};
/* LAG data of a ConnectX card.
* It serves both its phys functions.
*/
@ -66,7 +79,6 @@ struct mlx5_lag {
struct lag_tracker tracker;
struct workqueue_struct *wq;
struct delayed_work bond_work;
struct work_struct mpesw_work;
struct notifier_block nb;
struct lag_mp lag_mp;
struct mlx5_lag_port_sel port_sel;

View file

@ -7,63 +7,95 @@
#include "eswitch.h"
#include "lib/mlx5.h"
void mlx5_mpesw_work(struct work_struct *work)
static int add_mpesw_rule(struct mlx5_lag *ldev)
{
struct mlx5_lag *ldev = container_of(work, struct mlx5_lag, mpesw_work);
struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev;
int err;
mutex_lock(&ldev->lock);
mlx5_disable_lag(ldev);
mutex_unlock(&ldev->lock);
if (atomic_add_return(1, &ldev->lag_mpesw.mpesw_rule_count) != 1)
return 0;
if (ldev->mode != MLX5_LAG_MODE_NONE) {
err = -EINVAL;
goto out_err;
}
err = mlx5_activate_lag(ldev, NULL, MLX5_LAG_MODE_MPESW, false);
if (err) {
mlx5_core_warn(dev, "Failed to create LAG in MPESW mode (%d)\n", err);
goto out_err;
}
return 0;
out_err:
atomic_dec(&ldev->lag_mpesw.mpesw_rule_count);
return err;
}
static void mlx5_lag_disable_mpesw(struct mlx5_core_dev *dev)
static void del_mpesw_rule(struct mlx5_lag *ldev)
{
struct mlx5_lag *ldev = dev->priv.lag;
if (!queue_work(ldev->wq, &ldev->mpesw_work))
mlx5_core_warn(dev, "failed to queue work\n");
}
void mlx5_lag_del_mpesw_rule(struct mlx5_core_dev *dev)
{
struct mlx5_lag *ldev = dev->priv.lag;
if (!ldev)
return;
mutex_lock(&ldev->lock);
if (!atomic_dec_return(&ldev->lag_mpesw.mpesw_rule_count) &&
ldev->mode == MLX5_LAG_MODE_MPESW)
mlx5_lag_disable_mpesw(dev);
mutex_unlock(&ldev->lock);
mlx5_disable_lag(ldev);
}
int mlx5_lag_add_mpesw_rule(struct mlx5_core_dev *dev)
static void mlx5_mpesw_work(struct work_struct *work)
{
struct mlx5_mpesw_work_st *mpesww = container_of(work, struct mlx5_mpesw_work_st, work);
struct mlx5_lag *ldev = mpesww->lag;
mutex_lock(&ldev->lock);
if (mpesww->op == MLX5_MPESW_OP_ENABLE)
mpesww->result = add_mpesw_rule(ldev);
else if (mpesww->op == MLX5_MPESW_OP_DISABLE)
del_mpesw_rule(ldev);
mutex_unlock(&ldev->lock);
complete(&mpesww->comp);
}
static int mlx5_lag_mpesw_queue_work(struct mlx5_core_dev *dev,
enum mpesw_op op)
{
struct mlx5_lag *ldev = dev->priv.lag;
struct mlx5_mpesw_work_st *work;
int err = 0;
if (!ldev)
return 0;
mutex_lock(&ldev->lock);
if (atomic_add_return(1, &ldev->lag_mpesw.mpesw_rule_count) != 1)
goto out;
work = kzalloc(sizeof(*work), GFP_KERNEL);
if (!work)
return -ENOMEM;
if (ldev->mode != MLX5_LAG_MODE_NONE) {
INIT_WORK(&work->work, mlx5_mpesw_work);
init_completion(&work->comp);
work->op = op;
work->lag = ldev;
if (!queue_work(ldev->wq, &work->work)) {
mlx5_core_warn(dev, "failed to queue mpesw work\n");
err = -EINVAL;
goto out;
}
err = mlx5_activate_lag(ldev, NULL, MLX5_LAG_MODE_MPESW, false);
if (err)
mlx5_core_warn(dev, "Failed to create LAG in MPESW mode (%d)\n", err);
wait_for_completion(&work->comp);
err = work->result;
out:
mutex_unlock(&ldev->lock);
kfree(work);
return err;
}
void mlx5_lag_del_mpesw_rule(struct mlx5_core_dev *dev)
{
mlx5_lag_mpesw_queue_work(dev, MLX5_MPESW_OP_DISABLE);
}
int mlx5_lag_add_mpesw_rule(struct mlx5_core_dev *dev)
{
return mlx5_lag_mpesw_queue_work(dev, MLX5_MPESW_OP_ENABLE);
}
int mlx5_lag_do_mirred(struct mlx5_core_dev *mdev, struct net_device *out_dev)
{
struct mlx5_lag *ldev = mdev->priv.lag;
@ -71,12 +103,9 @@ int mlx5_lag_do_mirred(struct mlx5_core_dev *mdev, struct net_device *out_dev)
if (!netif_is_bond_master(out_dev) || !ldev)
return 0;
mutex_lock(&ldev->lock);
if (ldev->mode == MLX5_LAG_MODE_MPESW) {
mutex_unlock(&ldev->lock);
if (ldev->mode == MLX5_LAG_MODE_MPESW)
return -EOPNOTSUPP;
}
mutex_unlock(&ldev->lock);
return 0;
}
@ -90,11 +119,10 @@ bool mlx5_lag_mpesw_is_activated(struct mlx5_core_dev *dev)
void mlx5_lag_mpesw_init(struct mlx5_lag *ldev)
{
INIT_WORK(&ldev->mpesw_work, mlx5_mpesw_work);
atomic_set(&ldev->lag_mpesw.mpesw_rule_count, 0);
}
void mlx5_lag_mpesw_cleanup(struct mlx5_lag *ldev)
{
cancel_delayed_work_sync(&ldev->bond_work);
WARN_ON(atomic_read(&ldev->lag_mpesw.mpesw_rule_count));
}

View file

@ -12,7 +12,6 @@ struct lag_mpesw {
atomic_t mpesw_rule_count;
};
void mlx5_mpesw_work(struct work_struct *work);
int mlx5_lag_do_mirred(struct mlx5_core_dev *mdev, struct net_device *out_dev);
bool mlx5_lag_mpesw_is_activated(struct mlx5_core_dev *dev);
#if IS_ENABLED(CONFIG_MLX5_ESWITCH)

View file

@ -1798,7 +1798,8 @@ static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev,
res = state == pci_channel_io_perm_failure ?
PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
mlx5_pci_trace(dev, "Exit, result = %d, %s\n", res, result2str(res));
mlx5_core_info(dev, "%s Device state = %d pci_status: %d. Exit, result = %d, %s\n",
__func__, dev->state, dev->pci_status, res, result2str(res));
return res;
}
@ -1837,7 +1838,8 @@ static pci_ers_result_t mlx5_pci_slot_reset(struct pci_dev *pdev)
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
int err;
mlx5_pci_trace(dev, "Enter\n");
mlx5_core_info(dev, "%s Device state = %d pci_status: %d. Enter\n",
__func__, dev->state, dev->pci_status);
err = mlx5_pci_enable_device(dev);
if (err) {
@ -1859,7 +1861,8 @@ static pci_ers_result_t mlx5_pci_slot_reset(struct pci_dev *pdev)
res = PCI_ERS_RESULT_RECOVERED;
out:
mlx5_pci_trace(dev, "Exit, err = %d, result = %d, %s\n", err, res, result2str(res));
mlx5_core_info(dev, "%s Device state = %d pci_status: %d. Exit, err = %d, result = %d, %s\n",
__func__, dev->state, dev->pci_status, err, res, result2str(res));
return res;
}

View file

@ -18,6 +18,10 @@ struct mlx5_sf_dev_table {
phys_addr_t base_address;
u64 sf_bar_length;
struct notifier_block nb;
struct mutex table_lock; /* Serializes sf life cycle and vhca state change handler */
struct workqueue_struct *active_wq;
struct work_struct work;
u8 stop_active_wq:1;
struct mlx5_core_dev *dev;
};
@ -168,6 +172,7 @@ mlx5_sf_dev_state_change_handler(struct notifier_block *nb, unsigned long event_
return 0;
sf_index = event->function_id - base_id;
mutex_lock(&table->table_lock);
sf_dev = xa_load(&table->devices, sf_index);
switch (event->new_vhca_state) {
case MLX5_VHCA_STATE_INVALID:
@ -191,6 +196,7 @@ mlx5_sf_dev_state_change_handler(struct notifier_block *nb, unsigned long event_
default:
break;
}
mutex_unlock(&table->table_lock);
return 0;
}
@ -215,6 +221,78 @@ static int mlx5_sf_dev_vhca_arm_all(struct mlx5_sf_dev_table *table)
return 0;
}
static void mlx5_sf_dev_add_active_work(struct work_struct *work)
{
struct mlx5_sf_dev_table *table = container_of(work, struct mlx5_sf_dev_table, work);
u32 out[MLX5_ST_SZ_DW(query_vhca_state_out)] = {};
struct mlx5_core_dev *dev = table->dev;
u16 max_functions;
u16 function_id;
u16 sw_func_id;
int err = 0;
u8 state;
int i;
max_functions = mlx5_sf_max_functions(dev);
function_id = MLX5_CAP_GEN(dev, sf_base_id);
for (i = 0; i < max_functions; i++, function_id++) {
if (table->stop_active_wq)
return;
err = mlx5_cmd_query_vhca_state(dev, function_id, out, sizeof(out));
if (err)
/* A failure of specific vhca doesn't mean others will
* fail as well.
*/
continue;
state = MLX5_GET(query_vhca_state_out, out, vhca_state_context.vhca_state);
if (state != MLX5_VHCA_STATE_ACTIVE)
continue;
sw_func_id = MLX5_GET(query_vhca_state_out, out, vhca_state_context.sw_function_id);
mutex_lock(&table->table_lock);
/* Don't probe device which is already probe */
if (!xa_load(&table->devices, i))
mlx5_sf_dev_add(dev, i, function_id, sw_func_id);
/* There is a race where SF got inactive after the query
* above. e.g.: the query returns that the state of the
* SF is active, and after that the eswitch manager set it to
* inactive.
* This case cannot be managed in SW, since the probing of the
* SF is on one system, and the inactivation is on a different
* system.
* If the inactive is done after the SF perform init_hca(),
* the SF will fully probe and then removed. If it was
* done before init_hca(), the SF probe will fail.
*/
mutex_unlock(&table->table_lock);
}
}
/* In case SFs are generated externally, probe active SFs */
static int mlx5_sf_dev_queue_active_work(struct mlx5_sf_dev_table *table)
{
if (MLX5_CAP_GEN(table->dev, eswitch_manager))
return 0; /* the table is local */
/* Use a workqueue to probe active SFs, which are in large
* quantity and may take up to minutes to probe.
*/
table->active_wq = create_singlethread_workqueue("mlx5_active_sf");
if (!table->active_wq)
return -ENOMEM;
INIT_WORK(&table->work, &mlx5_sf_dev_add_active_work);
queue_work(table->active_wq, &table->work);
return 0;
}
static void mlx5_sf_dev_destroy_active_work(struct mlx5_sf_dev_table *table)
{
if (table->active_wq) {
table->stop_active_wq = true;
destroy_workqueue(table->active_wq);
}
}
void mlx5_sf_dev_table_create(struct mlx5_core_dev *dev)
{
struct mlx5_sf_dev_table *table;
@ -240,11 +318,17 @@ void mlx5_sf_dev_table_create(struct mlx5_core_dev *dev)
table->base_address = pci_resource_start(dev->pdev, 2);
table->max_sfs = max_sfs;
xa_init(&table->devices);
mutex_init(&table->table_lock);
dev->priv.sf_dev_table = table;
err = mlx5_vhca_event_notifier_register(dev, &table->nb);
if (err)
goto vhca_err;
err = mlx5_sf_dev_queue_active_work(table);
if (err)
goto add_active_err;
err = mlx5_sf_dev_vhca_arm_all(table);
if (err)
goto arm_err;
@ -252,6 +336,8 @@ void mlx5_sf_dev_table_create(struct mlx5_core_dev *dev)
return;
arm_err:
mlx5_sf_dev_destroy_active_work(table);
add_active_err:
mlx5_vhca_event_notifier_unregister(dev, &table->nb);
vhca_err:
table->max_sfs = 0;
@ -279,7 +365,9 @@ void mlx5_sf_dev_table_destroy(struct mlx5_core_dev *dev)
if (!table)
return;
mlx5_sf_dev_destroy_active_work(table);
mlx5_vhca_event_notifier_unregister(dev, &table->nb);
mutex_destroy(&table->table_lock);
/* Now that event handler is not running, it is safe to destroy
* the sf device without race.

View file

@ -104,7 +104,7 @@ static int sparx5_port_open(struct net_device *ndev)
err = phylink_of_phy_connect(port->phylink, port->of_node, 0);
if (err) {
netdev_err(ndev, "Could not attach to PHY\n");
return err;
goto err_connect;
}
phylink_start(port->phylink);
@ -116,10 +116,20 @@ static int sparx5_port_open(struct net_device *ndev)
err = sparx5_serdes_set(port->sparx5, port, &port->conf);
else
err = phy_power_on(port->serdes);
if (err)
if (err) {
netdev_err(ndev, "%s failed\n", __func__);
goto out_power;
}
}
return 0;
out_power:
phylink_stop(port->phylink);
phylink_disconnect_phy(port->phylink);
err_connect:
sparx5_port_enable(port, false);
return err;
}

View file

@ -90,13 +90,10 @@ static int sparx5_tc_setup_qdisc_ets(struct net_device *ndev,
}
}
sparx5_tc_ets_add(port, params);
break;
return sparx5_tc_ets_add(port, params);
case TC_ETS_DESTROY:
sparx5_tc_ets_del(port);
break;
return sparx5_tc_ets_del(port);
case TC_ETS_GRAFT:
return -EOPNOTSUPP;

View file

@ -341,7 +341,7 @@ int nfp_devlink_port_register(struct nfp_app *app, struct nfp_port *port)
return ret;
attrs.split = eth_port.is_split;
attrs.splittable = !attrs.split;
attrs.splittable = eth_port.port_lanes > 1 && !attrs.split;
attrs.lanes = eth_port.port_lanes;
attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
attrs.phys.port_number = eth_port.label_port;

View file

@ -1432,6 +1432,9 @@ nfp_port_get_module_info(struct net_device *netdev,
u8 data;
port = nfp_port_from_netdev(netdev);
if (!port)
return -EOPNOTSUPP;
/* update port state to get latest interface */
set_bit(NFP_PORT_CHANGED, &port->flags);
eth_port = nfp_port_get_eth_port(port);

View file

@ -1143,6 +1143,7 @@ static void pch_gbe_tx_queue(struct pch_gbe_adapter *adapter,
buffer_info->dma = 0;
buffer_info->time_stamp = 0;
tx_ring->next_to_use = ring_num;
dev_kfree_skb_any(skb);
return;
}
buffer_info->mapped = true;
@ -2459,6 +2460,7 @@ static void pch_gbe_remove(struct pci_dev *pdev)
unregister_netdev(netdev);
pch_gbe_phy_hw_reset(&adapter->hw);
pci_dev_put(adapter->ptp_pdev);
free_netdev(netdev);
}
@ -2533,7 +2535,7 @@ static int pch_gbe_probe(struct pci_dev *pdev,
/* setup the private structure */
ret = pch_gbe_sw_init(adapter);
if (ret)
goto err_free_netdev;
goto err_put_dev;
/* Initialize PHY */
ret = pch_gbe_init_phy(adapter);
@ -2591,6 +2593,8 @@ static int pch_gbe_probe(struct pci_dev *pdev,
err_free_adapter:
pch_gbe_phy_hw_reset(&adapter->hw);
err_put_dev:
pci_dev_put(adapter->ptp_pdev);
err_free_netdev:
free_netdev(netdev);
return ret;

View file

@ -2471,6 +2471,7 @@ static netdev_tx_t ql3xxx_send(struct sk_buff *skb,
skb_shinfo(skb)->nr_frags);
if (tx_cb->seg_count == -1) {
netdev_err(ndev, "%s: invalid segment count!\n", __func__);
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}

View file

@ -218,6 +218,7 @@ netdev_tx_t __ef100_hard_start_xmit(struct sk_buff *skb,
skb->len, skb->data_len, channel->channel);
if (!efx->n_channels || !efx->n_tx_channels || !channel) {
netif_stop_queue(net_dev);
dev_kfree_skb_any(skb);
goto err;
}

View file

@ -98,6 +98,7 @@ struct ipvl_port {
struct sk_buff_head backlog;
int count;
struct ida ida;
netdevice_tracker dev_tracker;
};
struct ipvl_skb_cb {

View file

@ -83,6 +83,7 @@ static int ipvlan_port_create(struct net_device *dev)
if (err)
goto err;
netdev_hold(dev, &port->dev_tracker, GFP_KERNEL);
return 0;
err:
@ -95,6 +96,7 @@ static void ipvlan_port_destroy(struct net_device *dev)
struct ipvl_port *port = ipvlan_port_get_rtnl(dev);
struct sk_buff *skb;
netdev_put(dev, &port->dev_tracker);
if (port->mode == IPVLAN_MODE_L3S)
ipvlan_l3s_unregister(port);
netdev_rx_handler_unregister(dev);

View file

@ -3835,7 +3835,6 @@ static int macsec_changelink(struct net_device *dev, struct nlattr *tb[],
if (macsec_is_offloaded(macsec)) {
const struct macsec_ops *ops;
struct macsec_context ctx;
int ret;
ops = macsec_get_ops(netdev_priv(dev), &ctx);
if (!ops) {

View file

@ -870,8 +870,10 @@ static int at803x_probe(struct phy_device *phydev)
.wolopts = 0,
};
if (ccr < 0)
if (ccr < 0) {
ret = ccr;
goto err;
}
mode_cfg = ccr & AT803X_MODE_CFG_MASK;
switch (mode_cfg) {

View file

@ -1915,6 +1915,7 @@ static const struct driver_info cdc_ncm_zlp_info = {
.status = cdc_ncm_status,
.rx_fixup = cdc_ncm_rx_fixup,
.tx_fixup = cdc_ncm_tx_fixup,
.set_rx_mode = usbnet_cdc_update_filter,
};
/* Same as cdc_ncm_info, but with FLAG_WWAN */

View file

@ -1423,6 +1423,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x0489, 0xe0b4, 0)}, /* Foxconn T77W968 LTE */
{QMI_FIXED_INTF(0x0489, 0xe0b5, 0)}, /* Foxconn T77W968 LTE with eSIM support*/
{QMI_FIXED_INTF(0x2692, 0x9025, 4)}, /* Cellient MPL200 (rebranded Qualcomm 05c6:9025) */
{QMI_QUIRK_SET_DTR(0x1546, 0x1342, 4)}, /* u-blox LARA-L6 */
/* 4. Gobi 1000 devices */
{QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */

View file

@ -3949,12 +3949,11 @@ static int virtnet_probe(struct virtio_device *vdev)
return 0;
free_unregister_netdev:
virtio_reset_device(vdev);
unregister_netdev(dev);
free_failover:
net_failover_destroy(vi->failover);
free_vqs:
virtio_reset_device(vdev);
cancel_delayed_work_sync(&vi->refill);
free_receive_page_frags(vi);
virtnet_del_vqs(vi);

View file

@ -249,7 +249,7 @@ static enum ipc_pcie_sleep_state ipc_pcie_read_bios_cfg(struct device *dev)
if (object->integer.value == 3)
sleep_state = IPC_PCIE_D3L2;
kfree(object);
ACPI_FREE(object);
default_ret:
return sleep_state;

View file

@ -165,6 +165,8 @@ static int t7xx_acpi_reset(struct t7xx_pci_dev *t7xx_dev, char *fn_name)
return -EFAULT;
}
kfree(buffer.pointer);
#endif
return 0;
}

View file

@ -112,8 +112,10 @@ static int nfcmrvl_i2c_nci_send(struct nfcmrvl_private *priv,
struct nfcmrvl_i2c_drv_data *drv_data = priv->drv_data;
int ret;
if (test_bit(NFCMRVL_PHY_ERROR, &priv->flags))
if (test_bit(NFCMRVL_PHY_ERROR, &priv->flags)) {
kfree_skb(skb);
return -EREMOTEIO;
}
ret = i2c_master_send(drv_data->i2c, skb->data, skb->len);

View file

@ -73,11 +73,15 @@ static int nxp_nci_send(struct nci_dev *ndev, struct sk_buff *skb)
struct nxp_nci_info *info = nci_get_drvdata(ndev);
int r;
if (!info->phy_ops->write)
if (!info->phy_ops->write) {
kfree_skb(skb);
return -EOPNOTSUPP;
}
if (info->mode != NXP_NCI_MODE_NCI)
if (info->mode != NXP_NCI_MODE_NCI) {
kfree_skb(skb);
return -EINVAL;
}
r = info->phy_ops->write(info->phy_id, skb);
if (r < 0) {

View file

@ -105,6 +105,7 @@ static int s3fwrn5_nci_send(struct nci_dev *ndev, struct sk_buff *skb)
mutex_lock(&info->mutex);
if (s3fwrn5_get_mode(info) != S3FWRN5_MODE_NCI) {
kfree_skb(skb);
mutex_unlock(&info->mutex);
return -EINVAL;
}

View file

@ -312,6 +312,8 @@ static int st_nci_hci_connectivity_event_received(struct nci_dev *ndev,
int r = 0;
struct device *dev = &ndev->nfc_dev->dev;
struct nfc_evt_transaction *transaction;
u32 aid_len;
u8 params_len;
pr_debug("connectivity gate event: %x\n", event);
@ -325,26 +327,47 @@ static int st_nci_hci_connectivity_event_received(struct nci_dev *ndev,
* Description Tag Length
* AID 81 5 to 16
* PARAMETERS 82 0 to 255
*
* The key differences are aid storage length is variably sized
* in the packet, but fixed in nfc_evt_transaction, and that
* the aid_len is u8 in the packet, but u32 in the structure,
* and the tags in the packet are not included in
* nfc_evt_transaction.
*
* size(b): 1 1 5-16 1 1 0-255
* offset: 0 1 2 aid_len + 2 aid_len + 3 aid_len + 4
* mem name: aid_tag(M) aid_len aid params_tag(M) params_len params
* example: 0x81 5-16 X 0x82 0-255 X
*/
if (skb->len < NFC_MIN_AID_LENGTH + 2 &&
skb->data[0] != NFC_EVT_TRANSACTION_AID_TAG)
if (skb->len < 2 || skb->data[0] != NFC_EVT_TRANSACTION_AID_TAG)
return -EPROTO;
transaction = devm_kzalloc(dev, skb->len - 2, GFP_KERNEL);
aid_len = skb->data[1];
if (skb->len < aid_len + 4 ||
aid_len > sizeof(transaction->aid))
return -EPROTO;
params_len = skb->data[aid_len + 3];
/* Verify PARAMETERS tag is (82), and final check that there is
* enough space in the packet to read everything.
*/
if (skb->data[aid_len + 2] != NFC_EVT_TRANSACTION_PARAMS_TAG ||
skb->len < aid_len + 4 + params_len)
return -EPROTO;
transaction = devm_kzalloc(dev, sizeof(*transaction) +
params_len, GFP_KERNEL);
if (!transaction)
return -ENOMEM;
transaction->aid_len = skb->data[1];
memcpy(transaction->aid, &skb->data[2], transaction->aid_len);
transaction->aid_len = aid_len;
transaction->params_len = params_len;
/* Check next byte is PARAMETERS tag (82) */
if (skb->data[transaction->aid_len + 2] !=
NFC_EVT_TRANSACTION_PARAMS_TAG)
return -EPROTO;
transaction->params_len = skb->data[transaction->aid_len + 3];
memcpy(transaction->params, skb->data +
transaction->aid_len + 4, transaction->params_len);
memcpy(transaction->aid, &skb->data[2], aid_len);
memcpy(transaction->params, &skb->data[aid_len + 4],
params_len);
r = nfc_se_transaction(ndev->nfc_dev, host, transaction);
break;

View file

@ -981,6 +981,7 @@ struct mlx5_async_work {
struct mlx5_async_ctx *ctx;
mlx5_async_cbk_t user_callback;
u16 opcode; /* cmd opcode */
u16 op_mod; /* cmd op_mod */
void *out; /* pointer to the cmd output buffer */
};

View file

@ -281,7 +281,8 @@ inet_bhash2_addr_any_hashbucket(const struct sock *sk, const struct net *net, in
* sk_v6_rcv_saddr (ipv6) changes after it has been binded. The socket's
* rcv_saddr field should already have been updated when this is called.
*/
int inet_bhash2_update_saddr(struct inet_bind_hashbucket *prev_saddr, struct sock *sk);
int inet_bhash2_update_saddr(struct sock *sk, void *saddr, int family);
void inet_bhash2_reset_saddr(struct sock *sk);
void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb,
struct inet_bind2_bucket *tb2, unsigned short port);

View file

@ -83,7 +83,7 @@ struct neigh_parms {
struct rcu_head rcu_head;
int reachable_time;
int qlen;
u32 qlen;
int data[NEIGH_VAR_DATA_MAX];
DECLARE_BITMAP(data_state, NEIGH_VAR_DATA_MAX);
};

View file

@ -296,7 +296,7 @@ skb_flow_dissect_ct(const struct sk_buff *skb,
key->ct_zone = ct->zone.id;
#endif
#if IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)
key->ct_mark = ct->mark;
key->ct_mark = READ_ONCE(ct->mark);
#endif
cl = nf_ct_labels_find(ct);

View file

@ -48,9 +48,11 @@ static const char *lwtunnel_encap_str(enum lwtunnel_encap_types encap_type)
return "RPL";
case LWTUNNEL_ENCAP_IOAM6:
return "IOAM6";
case LWTUNNEL_ENCAP_XFRM:
/* module autoload not supported for encap type */
return NULL;
case LWTUNNEL_ENCAP_IP6:
case LWTUNNEL_ENCAP_IP:
case LWTUNNEL_ENCAP_XFRM:
case LWTUNNEL_ENCAP_NONE:
case __LWTUNNEL_ENCAP_MAX:
/* should not have got here */

View file

@ -307,7 +307,31 @@ static int neigh_del_timer(struct neighbour *n)
return 0;
}
static void pneigh_queue_purge(struct sk_buff_head *list, struct net *net)
static struct neigh_parms *neigh_get_dev_parms_rcu(struct net_device *dev,
int family)
{
switch (family) {
case AF_INET:
return __in_dev_arp_parms_get_rcu(dev);
case AF_INET6:
return __in6_dev_nd_parms_get_rcu(dev);
}
return NULL;
}
static void neigh_parms_qlen_dec(struct net_device *dev, int family)
{
struct neigh_parms *p;
rcu_read_lock();
p = neigh_get_dev_parms_rcu(dev, family);
if (p)
p->qlen--;
rcu_read_unlock();
}
static void pneigh_queue_purge(struct sk_buff_head *list, struct net *net,
int family)
{
struct sk_buff_head tmp;
unsigned long flags;
@ -321,13 +345,7 @@ static void pneigh_queue_purge(struct sk_buff_head *list, struct net *net)
struct net_device *dev = skb->dev;
if (net == NULL || net_eq(dev_net(dev), net)) {
struct in_device *in_dev;
rcu_read_lock();
in_dev = __in_dev_get_rcu(dev);
if (in_dev)
in_dev->arp_parms->qlen--;
rcu_read_unlock();
neigh_parms_qlen_dec(dev, family);
__skb_unlink(skb, list);
__skb_queue_tail(&tmp, skb);
}
@ -409,7 +427,8 @@ static int __neigh_ifdown(struct neigh_table *tbl, struct net_device *dev,
write_lock_bh(&tbl->lock);
neigh_flush_dev(tbl, dev, skip_perm);
pneigh_ifdown_and_unlock(tbl, dev);
pneigh_queue_purge(&tbl->proxy_queue, dev ? dev_net(dev) : NULL);
pneigh_queue_purge(&tbl->proxy_queue, dev ? dev_net(dev) : NULL,
tbl->family);
if (skb_queue_empty_lockless(&tbl->proxy_queue))
del_timer_sync(&tbl->proxy_timer);
return 0;
@ -1621,13 +1640,8 @@ static void neigh_proxy_process(struct timer_list *t)
if (tdif <= 0) {
struct net_device *dev = skb->dev;
struct in_device *in_dev;
rcu_read_lock();
in_dev = __in_dev_get_rcu(dev);
if (in_dev)
in_dev->arp_parms->qlen--;
rcu_read_unlock();
neigh_parms_qlen_dec(dev, tbl->family);
__skb_unlink(skb, &tbl->proxy_queue);
if (tbl->proxy_redo && netif_running(dev)) {
@ -1821,7 +1835,7 @@ int neigh_table_clear(int index, struct neigh_table *tbl)
cancel_delayed_work_sync(&tbl->managed_work);
cancel_delayed_work_sync(&tbl->gc_work);
del_timer_sync(&tbl->proxy_timer);
pneigh_queue_purge(&tbl->proxy_queue, NULL);
pneigh_queue_purge(&tbl->proxy_queue, NULL, tbl->family);
neigh_ifdown(tbl, NULL);
if (atomic_read(&tbl->entries))
pr_crit("neighbour leakage\n");
@ -3539,18 +3553,6 @@ static int proc_unres_qlen(struct ctl_table *ctl, int write,
return ret;
}
static struct neigh_parms *neigh_get_dev_parms_rcu(struct net_device *dev,
int family)
{
switch (family) {
case AF_INET:
return __in_dev_arp_parms_get_rcu(dev);
case AF_INET6:
return __in6_dev_nd_parms_get_rcu(dev);
}
return NULL;
}
static void neigh_copy_dflt_parms(struct net *net, struct neigh_parms *p,
int index)
{

View file

@ -45,11 +45,10 @@ static unsigned int dccp_v4_pernet_id __read_mostly;
int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
{
const struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
struct inet_bind_hashbucket *prev_addr_hashbucket = NULL;
__be32 daddr, nexthop, prev_sk_rcv_saddr;
struct inet_sock *inet = inet_sk(sk);
struct dccp_sock *dp = dccp_sk(sk);
__be16 orig_sport, orig_dport;
__be32 daddr, nexthop;
struct flowi4 *fl4;
struct rtable *rt;
int err;
@ -91,26 +90,13 @@ int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
daddr = fl4->daddr;
if (inet->inet_saddr == 0) {
if (inet_csk(sk)->icsk_bind2_hash) {
prev_addr_hashbucket =
inet_bhashfn_portaddr(&dccp_hashinfo, sk,
sock_net(sk),
inet->inet_num);
prev_sk_rcv_saddr = sk->sk_rcv_saddr;
}
inet->inet_saddr = fl4->saddr;
}
sk_rcv_saddr_set(sk, inet->inet_saddr);
if (prev_addr_hashbucket) {
err = inet_bhash2_update_saddr(prev_addr_hashbucket, sk);
err = inet_bhash2_update_saddr(sk, &fl4->saddr, AF_INET);
if (err) {
inet->inet_saddr = 0;
sk_rcv_saddr_set(sk, prev_sk_rcv_saddr);
ip_rt_put(rt);
return err;
}
} else {
sk_rcv_saddr_set(sk, inet->inet_saddr);
}
inet->inet_dport = usin->sin_port;
@ -157,6 +143,7 @@ failure:
* This unhashes the socket and releases the local port, if necessary.
*/
dccp_set_state(sk, DCCP_CLOSED);
inet_bhash2_reset_saddr(sk);
ip_rt_put(rt);
sk->sk_route_caps = 0;
inet->inet_dport = 0;

View file

@ -934,26 +934,11 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
}
if (saddr == NULL) {
struct inet_bind_hashbucket *prev_addr_hashbucket = NULL;
struct in6_addr prev_v6_rcv_saddr;
if (icsk->icsk_bind2_hash) {
prev_addr_hashbucket = inet_bhashfn_portaddr(&dccp_hashinfo,
sk, sock_net(sk),
inet->inet_num);
prev_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
}
saddr = &fl6.saddr;
sk->sk_v6_rcv_saddr = *saddr;
if (prev_addr_hashbucket) {
err = inet_bhash2_update_saddr(prev_addr_hashbucket, sk);
if (err) {
sk->sk_v6_rcv_saddr = prev_v6_rcv_saddr;
goto failure;
}
}
err = inet_bhash2_update_saddr(sk, saddr, AF_INET6);
if (err)
goto failure;
}
/* set the source address */
@ -985,6 +970,7 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
late_failure:
dccp_set_state(sk, DCCP_CLOSED);
inet_bhash2_reset_saddr(sk);
__sk_dst_reset(sk);
failure:
inet->inet_dport = 0;

View file

@ -279,8 +279,7 @@ int dccp_disconnect(struct sock *sk, int flags)
inet->inet_dport = 0;
if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
inet_reset_saddr(sk);
inet_bhash2_reset_saddr(sk);
sk->sk_shutdown = 0;
sock_reset_flag(sk, SOCK_DONE);

View file

@ -1230,7 +1230,6 @@ EXPORT_SYMBOL(inet_unregister_protosw);
static int inet_sk_reselect_saddr(struct sock *sk)
{
struct inet_bind_hashbucket *prev_addr_hashbucket;
struct inet_sock *inet = inet_sk(sk);
__be32 old_saddr = inet->inet_saddr;
__be32 daddr = inet->inet_daddr;
@ -1260,16 +1259,8 @@ static int inet_sk_reselect_saddr(struct sock *sk)
return 0;
}
prev_addr_hashbucket =
inet_bhashfn_portaddr(tcp_or_dccp_get_hashinfo(sk), sk,
sock_net(sk), inet->inet_num);
inet->inet_saddr = inet->inet_rcv_saddr = new_saddr;
err = inet_bhash2_update_saddr(prev_addr_hashbucket, sk);
err = inet_bhash2_update_saddr(sk, &new_saddr, AF_INET);
if (err) {
inet->inet_saddr = old_saddr;
inet->inet_rcv_saddr = old_saddr;
ip_rt_put(rt);
return err;
}

View file

@ -314,6 +314,9 @@ static int esp_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features_
xo->seq.low += skb_shinfo(skb)->gso_segs;
}
if (xo->seq.low < seq)
xo->seq.hi++;
esp.seqno = cpu_to_be64(seq + ((u64)xo->seq.hi << 32));
ip_hdr(skb)->tot_len = htons(skb->len);

View file

@ -126,7 +126,7 @@ struct key_vector {
/* This list pointer if valid if (pos | bits) == 0 (LEAF) */
struct hlist_head leaf;
/* This array is valid if (pos | bits) > 0 (TNODE) */
struct key_vector __rcu *tnode[0];
DECLARE_FLEX_ARRAY(struct key_vector __rcu *, tnode);
};
};
@ -1381,8 +1381,10 @@ int fib_table_insert(struct net *net, struct fib_table *tb,
/* The alias was already inserted, so the node must exist. */
l = l ? l : fib_find_node(t, &tp, key);
if (WARN_ON_ONCE(!l))
if (WARN_ON_ONCE(!l)) {
err = -ENOENT;
goto out_free_new_fa;
}
if (fib_find_alias(&l->leaf, new_fa->fa_slen, 0, 0, tb->tb_id, true) ==
new_fa) {

View file

@ -858,34 +858,80 @@ inet_bhash2_addr_any_hashbucket(const struct sock *sk, const struct net *net, in
return &hinfo->bhash2[hash & (hinfo->bhash_size - 1)];
}
int inet_bhash2_update_saddr(struct inet_bind_hashbucket *prev_saddr, struct sock *sk)
static void inet_update_saddr(struct sock *sk, void *saddr, int family)
{
if (family == AF_INET) {
inet_sk(sk)->inet_saddr = *(__be32 *)saddr;
sk_rcv_saddr_set(sk, inet_sk(sk)->inet_saddr);
}
#if IS_ENABLED(CONFIG_IPV6)
else {
sk->sk_v6_rcv_saddr = *(struct in6_addr *)saddr;
}
#endif
}
static int __inet_bhash2_update_saddr(struct sock *sk, void *saddr, int family, bool reset)
{
struct inet_hashinfo *hinfo = tcp_or_dccp_get_hashinfo(sk);
struct inet_bind_hashbucket *head, *head2;
struct inet_bind2_bucket *tb2, *new_tb2;
int l3mdev = inet_sk_bound_l3mdev(sk);
struct inet_bind_hashbucket *head2;
int port = inet_sk(sk)->inet_num;
struct net *net = sock_net(sk);
int bhash;
if (!inet_csk(sk)->icsk_bind2_hash) {
/* Not bind()ed before. */
if (reset)
inet_reset_saddr(sk);
else
inet_update_saddr(sk, saddr, family);
return 0;
}
/* Allocate a bind2 bucket ahead of time to avoid permanently putting
* the bhash2 table in an inconsistent state if a new tb2 bucket
* allocation fails.
*/
new_tb2 = kmem_cache_alloc(hinfo->bind2_bucket_cachep, GFP_ATOMIC);
if (!new_tb2)
if (!new_tb2) {
if (reset) {
/* The (INADDR_ANY, port) bucket might have already
* been freed, then we cannot fixup icsk_bind2_hash,
* so we give up and unlink sk from bhash/bhash2 not
* to leave inconsistency in bhash2.
*/
inet_put_port(sk);
inet_reset_saddr(sk);
}
return -ENOMEM;
}
bhash = inet_bhashfn(net, port, hinfo->bhash_size);
head = &hinfo->bhash[bhash];
head2 = inet_bhashfn_portaddr(hinfo, sk, net, port);
/* If we change saddr locklessly, another thread
* iterating over bhash might see corrupted address.
*/
spin_lock_bh(&head->lock);
spin_lock(&head2->lock);
__sk_del_bind2_node(sk);
inet_bind2_bucket_destroy(hinfo->bind2_bucket_cachep, inet_csk(sk)->icsk_bind2_hash);
spin_unlock(&head2->lock);
if (reset)
inet_reset_saddr(sk);
else
inet_update_saddr(sk, saddr, family);
head2 = inet_bhashfn_portaddr(hinfo, sk, net, port);
if (prev_saddr) {
spin_lock_bh(&prev_saddr->lock);
__sk_del_bind2_node(sk);
inet_bind2_bucket_destroy(hinfo->bind2_bucket_cachep,
inet_csk(sk)->icsk_bind2_hash);
spin_unlock_bh(&prev_saddr->lock);
}
spin_lock_bh(&head2->lock);
spin_lock(&head2->lock);
tb2 = inet_bind2_bucket_find(head2, net, port, l3mdev, sk);
if (!tb2) {
tb2 = new_tb2;
@ -893,15 +939,29 @@ int inet_bhash2_update_saddr(struct inet_bind_hashbucket *prev_saddr, struct soc
}
sk_add_bind2_node(sk, &tb2->owners);
inet_csk(sk)->icsk_bind2_hash = tb2;
spin_unlock_bh(&head2->lock);
spin_unlock(&head2->lock);
spin_unlock_bh(&head->lock);
if (tb2 != new_tb2)
kmem_cache_free(hinfo->bind2_bucket_cachep, new_tb2);
return 0;
}
int inet_bhash2_update_saddr(struct sock *sk, void *saddr, int family)
{
return __inet_bhash2_update_saddr(sk, saddr, family, false);
}
EXPORT_SYMBOL_GPL(inet_bhash2_update_saddr);
void inet_bhash2_reset_saddr(struct sock *sk)
{
if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
__inet_bhash2_update_saddr(sk, NULL, 0, true);
}
EXPORT_SYMBOL_GPL(inet_bhash2_reset_saddr);
/* RFC 6056 3.3.4. Algorithm 4: Double-Hash Port Selection Algorithm
* Note that we use 32bit integers (vs RFC 'short integers')
* because 2^16 is not a multiple of num_ephemeral and this

View file

@ -366,6 +366,11 @@ static int ip_rcv_finish_core(struct net *net, struct sock *sk,
iph->tos, dev);
if (unlikely(err))
goto drop_error;
} else {
struct in_device *in_dev = __in_dev_get_rcu(dev);
if (in_dev && IN_DEV_ORCONF(in_dev, NOPOLICY))
IPCB(skb)->flags |= IPSKB_NOPOLICY;
}
#ifdef CONFIG_IP_ROUTE_CLASSID

View file

@ -435,7 +435,7 @@ clusterip_tg(struct sk_buff *skb, const struct xt_action_param *par)
switch (ctinfo) {
case IP_CT_NEW:
ct->mark = hash;
WRITE_ONCE(ct->mark, hash);
break;
case IP_CT_RELATED:
case IP_CT_RELATED_REPLY:
@ -452,7 +452,7 @@ clusterip_tg(struct sk_buff *skb, const struct xt_action_param *par)
#ifdef DEBUG
nf_ct_dump_tuple_ip(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
#endif
pr_debug("hash=%u ct_hash=%u ", hash, ct->mark);
pr_debug("hash=%u ct_hash=%u ", hash, READ_ONCE(ct->mark));
if (!clusterip_responsible(cipinfo->config, hash)) {
pr_debug("not responsible\n");
return NF_DROP;

View file

@ -3114,8 +3114,7 @@ int tcp_disconnect(struct sock *sk, int flags)
inet->inet_dport = 0;
if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
inet_reset_saddr(sk);
inet_bhash2_reset_saddr(sk);
sk->sk_shutdown = 0;
sock_reset_flag(sk, SOCK_DONE);

View file

@ -199,15 +199,14 @@ static int tcp_v4_pre_connect(struct sock *sk, struct sockaddr *uaddr,
/* This will initiate an outgoing connection. */
int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
{
struct inet_bind_hashbucket *prev_addr_hashbucket = NULL;
struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
struct inet_timewait_death_row *tcp_death_row;
__be32 daddr, nexthop, prev_sk_rcv_saddr;
struct inet_sock *inet = inet_sk(sk);
struct tcp_sock *tp = tcp_sk(sk);
struct ip_options_rcu *inet_opt;
struct net *net = sock_net(sk);
__be16 orig_sport, orig_dport;
__be32 daddr, nexthop;
struct flowi4 *fl4;
struct rtable *rt;
int err;
@ -251,24 +250,13 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row;
if (!inet->inet_saddr) {
if (inet_csk(sk)->icsk_bind2_hash) {
prev_addr_hashbucket = inet_bhashfn_portaddr(tcp_death_row->hashinfo,
sk, net, inet->inet_num);
prev_sk_rcv_saddr = sk->sk_rcv_saddr;
}
inet->inet_saddr = fl4->saddr;
}
sk_rcv_saddr_set(sk, inet->inet_saddr);
if (prev_addr_hashbucket) {
err = inet_bhash2_update_saddr(prev_addr_hashbucket, sk);
err = inet_bhash2_update_saddr(sk, &fl4->saddr, AF_INET);
if (err) {
inet->inet_saddr = 0;
sk_rcv_saddr_set(sk, prev_sk_rcv_saddr);
ip_rt_put(rt);
return err;
}
} else {
sk_rcv_saddr_set(sk, inet->inet_saddr);
}
if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) {
@ -343,6 +331,7 @@ failure:
* if necessary.
*/
tcp_set_state(sk, TCP_CLOSE);
inet_bhash2_reset_saddr(sk);
ip_rt_put(rt);
sk->sk_route_caps = 0;
inet->inet_dport = 0;

View file

@ -346,6 +346,9 @@ static int esp6_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features
xo->seq.low += skb_shinfo(skb)->gso_segs;
}
if (xo->seq.low < seq)
xo->seq.hi++;
esp.seqno = cpu_to_be64(xo->seq.low + ((u64)xo->seq.hi << 32));
len = skb->len - sizeof(struct ipv6hdr);

View file

@ -292,24 +292,11 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row;
if (!saddr) {
struct inet_bind_hashbucket *prev_addr_hashbucket = NULL;
struct in6_addr prev_v6_rcv_saddr;
if (icsk->icsk_bind2_hash) {
prev_addr_hashbucket = inet_bhashfn_portaddr(tcp_death_row->hashinfo,
sk, net, inet->inet_num);
prev_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
}
saddr = &fl6.saddr;
sk->sk_v6_rcv_saddr = *saddr;
if (prev_addr_hashbucket) {
err = inet_bhash2_update_saddr(prev_addr_hashbucket, sk);
if (err) {
sk->sk_v6_rcv_saddr = prev_v6_rcv_saddr;
goto failure;
}
}
err = inet_bhash2_update_saddr(sk, saddr, AF_INET6);
if (err)
goto failure;
}
/* set the source address */
@ -359,6 +346,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
late_failure:
tcp_set_state(sk, TCP_CLOSE);
inet_bhash2_reset_saddr(sk);
failure:
inet->inet_dport = 0;
sk->sk_route_caps = 0;

View file

@ -287,9 +287,13 @@ int __init xfrm6_init(void)
if (ret)
goto out_state;
register_pernet_subsys(&xfrm6_net_ops);
ret = register_pernet_subsys(&xfrm6_net_ops);
if (ret)
goto out_protocol;
out:
return ret;
out_protocol:
xfrm6_protocol_fini();
out_state:
xfrm6_state_fini();
out_policy:

View file

@ -2905,7 +2905,7 @@ static int count_ah_combs(const struct xfrm_tmpl *t)
break;
if (!aalg->pfkey_supported)
continue;
if (aalg_tmpl_set(t, aalg) && aalg->available)
if (aalg_tmpl_set(t, aalg))
sz += sizeof(struct sadb_comb);
}
return sz + sizeof(struct sadb_prop);
@ -2923,7 +2923,7 @@ static int count_esp_combs(const struct xfrm_tmpl *t)
if (!ealg->pfkey_supported)
continue;
if (!(ealg_tmpl_set(t, ealg) && ealg->available))
if (!(ealg_tmpl_set(t, ealg)))
continue;
for (k = 1; ; k++) {
@ -2934,16 +2934,17 @@ static int count_esp_combs(const struct xfrm_tmpl *t)
if (!aalg->pfkey_supported)
continue;
if (aalg_tmpl_set(t, aalg) && aalg->available)
if (aalg_tmpl_set(t, aalg))
sz += sizeof(struct sadb_comb);
}
}
return sz + sizeof(struct sadb_prop);
}
static void dump_ah_combs(struct sk_buff *skb, const struct xfrm_tmpl *t)
static int dump_ah_combs(struct sk_buff *skb, const struct xfrm_tmpl *t)
{
struct sadb_prop *p;
int sz = 0;
int i;
p = skb_put(skb, sizeof(struct sadb_prop));
@ -2971,13 +2972,17 @@ static void dump_ah_combs(struct sk_buff *skb, const struct xfrm_tmpl *t)
c->sadb_comb_soft_addtime = 20*60*60;
c->sadb_comb_hard_usetime = 8*60*60;
c->sadb_comb_soft_usetime = 7*60*60;
sz += sizeof(*c);
}
}
return sz + sizeof(*p);
}
static void dump_esp_combs(struct sk_buff *skb, const struct xfrm_tmpl *t)
static int dump_esp_combs(struct sk_buff *skb, const struct xfrm_tmpl *t)
{
struct sadb_prop *p;
int sz = 0;
int i, k;
p = skb_put(skb, sizeof(struct sadb_prop));
@ -3019,8 +3024,11 @@ static void dump_esp_combs(struct sk_buff *skb, const struct xfrm_tmpl *t)
c->sadb_comb_soft_addtime = 20*60*60;
c->sadb_comb_hard_usetime = 8*60*60;
c->sadb_comb_soft_usetime = 7*60*60;
sz += sizeof(*c);
}
}
return sz + sizeof(*p);
}
static int key_notify_policy_expire(struct xfrm_policy *xp, const struct km_event *c)
@ -3150,6 +3158,7 @@ static int pfkey_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *t, struct
struct sadb_x_sec_ctx *sec_ctx;
struct xfrm_sec_ctx *xfrm_ctx;
int ctx_size = 0;
int alg_size = 0;
sockaddr_size = pfkey_sockaddr_size(x->props.family);
if (!sockaddr_size)
@ -3161,16 +3170,16 @@ static int pfkey_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *t, struct
sizeof(struct sadb_x_policy);
if (x->id.proto == IPPROTO_AH)
size += count_ah_combs(t);
alg_size = count_ah_combs(t);
else if (x->id.proto == IPPROTO_ESP)
size += count_esp_combs(t);
alg_size = count_esp_combs(t);
if ((xfrm_ctx = x->security)) {
ctx_size = PFKEY_ALIGN8(xfrm_ctx->ctx_len);
size += sizeof(struct sadb_x_sec_ctx) + ctx_size;
}
skb = alloc_skb(size + 16, GFP_ATOMIC);
skb = alloc_skb(size + alg_size + 16, GFP_ATOMIC);
if (skb == NULL)
return -ENOMEM;
@ -3224,10 +3233,13 @@ static int pfkey_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *t, struct
pol->sadb_x_policy_priority = xp->priority;
/* Set sadb_comb's. */
alg_size = 0;
if (x->id.proto == IPPROTO_AH)
dump_ah_combs(skb, t);
alg_size = dump_ah_combs(skb, t);
else if (x->id.proto == IPPROTO_ESP)
dump_esp_combs(skb, t);
alg_size = dump_esp_combs(skb, t);
hdr->sadb_msg_len += alg_size / 8;
/* security context */
if (xfrm_ctx) {
@ -3382,7 +3394,7 @@ static int pfkey_send_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr,
hdr->sadb_msg_len = size / sizeof(uint64_t);
hdr->sadb_msg_errno = 0;
hdr->sadb_msg_reserved = 0;
hdr->sadb_msg_seq = x->km.seq = get_acqseq();
hdr->sadb_msg_seq = x->km.seq;
hdr->sadb_msg_pid = 0;
/* SA */

View file

@ -1474,11 +1474,12 @@ int l2tp_tunnel_register(struct l2tp_tunnel *tunnel, struct net *net,
}
sk = sock->sk;
write_lock(&sk->sk_callback_lock);
write_lock_bh(&sk->sk_callback_lock);
ret = l2tp_validate_socket(sk, net, tunnel->encap);
if (ret < 0)
goto err_sock;
goto err_inval_sock;
rcu_assign_sk_user_data(sk, tunnel);
write_unlock_bh(&sk->sk_callback_lock);
tunnel->l2tp_net = net;
pn = l2tp_pernet(net);
@ -1507,8 +1508,6 @@ int l2tp_tunnel_register(struct l2tp_tunnel *tunnel, struct net *net,
};
setup_udp_tunnel_sock(net, sock, &udp_cfg);
} else {
rcu_assign_sk_user_data(sk, tunnel);
}
tunnel->old_sk_destruct = sk->sk_destruct;
@ -1522,16 +1521,18 @@ int l2tp_tunnel_register(struct l2tp_tunnel *tunnel, struct net *net,
if (tunnel->fd >= 0)
sockfd_put(sock);
write_unlock(&sk->sk_callback_lock);
return 0;
err_sock:
write_lock_bh(&sk->sk_callback_lock);
rcu_assign_sk_user_data(sk, NULL);
err_inval_sock:
write_unlock_bh(&sk->sk_callback_lock);
if (tunnel->fd < 0)
sock_release(sock);
else
sockfd_put(sock);
write_unlock(&sk->sk_callback_lock);
err:
return ret;
}

View file

@ -916,7 +916,7 @@ mtype_add(struct ip_set *set, void *value, const struct ip_set_ext *ext,
#ifdef IP_SET_HASH_WITH_MULTI
if (h->bucketsize >= AHASH_MAX_TUNED)
goto set_full;
else if (h->bucketsize < multi)
else if (h->bucketsize <= multi)
h->bucketsize += AHASH_INIT_SIZE;
#endif
if (n->size >= AHASH_MAX(h)) {

View file

@ -151,18 +151,16 @@ hash_ip4_uadt(struct ip_set *set, struct nlattr *tb[],
if (((u64)ip_to - ip + 1) >> (32 - h->netmask) > IPSET_MAX_RANGE)
return -ERANGE;
if (retried) {
if (retried)
ip = ntohl(h->next.ip);
e.ip = htonl(ip);
}
for (; ip <= ip_to;) {
e.ip = htonl(ip);
ret = adtfn(set, &e, &ext, &ext, flags);
if (ret && !ip_set_eexist(ret, flags))
return ret;
ip += hosts;
e.ip = htonl(ip);
if (e.ip == 0)
if (ip == 0)
return 0;
ret = 0;

View file

@ -1781,7 +1781,7 @@ init_conntrack(struct net *net, struct nf_conn *tmpl,
}
#ifdef CONFIG_NF_CONNTRACK_MARK
ct->mark = exp->master->mark;
ct->mark = READ_ONCE(exp->master->mark);
#endif
#ifdef CONFIG_NF_CONNTRACK_SECMARK
ct->secmark = exp->master->secmark;

View file

@ -328,9 +328,9 @@ nla_put_failure:
}
#ifdef CONFIG_NF_CONNTRACK_MARK
static int ctnetlink_dump_mark(struct sk_buff *skb, const struct nf_conn *ct)
static int ctnetlink_dump_mark(struct sk_buff *skb, u32 mark)
{
if (nla_put_be32(skb, CTA_MARK, htonl(ct->mark)))
if (nla_put_be32(skb, CTA_MARK, htonl(mark)))
goto nla_put_failure;
return 0;
@ -543,7 +543,7 @@ static int ctnetlink_dump_extinfo(struct sk_buff *skb,
static int ctnetlink_dump_info(struct sk_buff *skb, struct nf_conn *ct)
{
if (ctnetlink_dump_status(skb, ct) < 0 ||
ctnetlink_dump_mark(skb, ct) < 0 ||
ctnetlink_dump_mark(skb, READ_ONCE(ct->mark)) < 0 ||
ctnetlink_dump_secctx(skb, ct) < 0 ||
ctnetlink_dump_id(skb, ct) < 0 ||
ctnetlink_dump_use(skb, ct) < 0 ||
@ -722,6 +722,7 @@ ctnetlink_conntrack_event(unsigned int events, const struct nf_ct_event *item)
struct sk_buff *skb;
unsigned int type;
unsigned int flags = 0, group;
u32 mark;
int err;
if (events & (1 << IPCT_DESTROY)) {
@ -826,8 +827,9 @@ ctnetlink_conntrack_event(unsigned int events, const struct nf_ct_event *item)
}
#ifdef CONFIG_NF_CONNTRACK_MARK
if ((events & (1 << IPCT_MARK) || ct->mark)
&& ctnetlink_dump_mark(skb, ct) < 0)
mark = READ_ONCE(ct->mark);
if ((events & (1 << IPCT_MARK) || mark) &&
ctnetlink_dump_mark(skb, mark) < 0)
goto nla_put_failure;
#endif
nlmsg_end(skb, nlh);
@ -1154,7 +1156,7 @@ static int ctnetlink_filter_match(struct nf_conn *ct, void *data)
}
#ifdef CONFIG_NF_CONNTRACK_MARK
if ((ct->mark & filter->mark.mask) != filter->mark.val)
if ((READ_ONCE(ct->mark) & filter->mark.mask) != filter->mark.val)
goto ignore_entry;
#endif
status = (u32)READ_ONCE(ct->status);
@ -2002,9 +2004,9 @@ static void ctnetlink_change_mark(struct nf_conn *ct,
mask = ~ntohl(nla_get_be32(cda[CTA_MARK_MASK]));
mark = ntohl(nla_get_be32(cda[CTA_MARK]));
newmark = (ct->mark & mask) ^ mark;
if (newmark != ct->mark)
ct->mark = newmark;
newmark = (READ_ONCE(ct->mark) & mask) ^ mark;
if (newmark != READ_ONCE(ct->mark))
WRITE_ONCE(ct->mark, newmark);
}
#endif
@ -2669,6 +2671,7 @@ static int __ctnetlink_glue_build(struct sk_buff *skb, struct nf_conn *ct)
{
const struct nf_conntrack_zone *zone;
struct nlattr *nest_parms;
u32 mark;
zone = nf_ct_zone(ct);
@ -2730,7 +2733,8 @@ static int __ctnetlink_glue_build(struct sk_buff *skb, struct nf_conn *ct)
goto nla_put_failure;
#ifdef CONFIG_NF_CONNTRACK_MARK
if (ct->mark && ctnetlink_dump_mark(skb, ct) < 0)
mark = READ_ONCE(ct->mark);
if (mark && ctnetlink_dump_mark(skb, mark) < 0)
goto nla_put_failure;
#endif
if (ctnetlink_dump_labels(skb, ct) < 0)

View file

@ -366,7 +366,7 @@ static int ct_seq_show(struct seq_file *s, void *v)
goto release;
#if defined(CONFIG_NF_CONNTRACK_MARK)
seq_printf(s, "mark=%u ", ct->mark);
seq_printf(s, "mark=%u ", READ_ONCE(ct->mark));
#endif
ct_show_secctx(s, ct);

View file

@ -1098,6 +1098,7 @@ static int nf_flow_table_block_setup(struct nf_flowtable *flowtable,
struct flow_block_cb *block_cb, *next;
int err = 0;
down_write(&flowtable->flow_block_lock);
switch (cmd) {
case FLOW_BLOCK_BIND:
list_splice(&bo->cb_list, &flowtable->flow_block.cb_list);
@ -1112,6 +1113,7 @@ static int nf_flow_table_block_setup(struct nf_flowtable *flowtable,
WARN_ON_ONCE(1);
err = -EOPNOTSUPP;
}
up_write(&flowtable->flow_block_lock);
return err;
}
@ -1168,7 +1170,9 @@ static int nf_flow_table_offload_cmd(struct flow_block_offload *bo,
nf_flow_table_block_offload_init(bo, dev_net(dev), cmd, flowtable,
extack);
down_write(&flowtable->flow_block_lock);
err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_FT, bo);
up_write(&flowtable->flow_block_lock);
if (err < 0)
return err;

View file

@ -5958,7 +5958,8 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
&timeout);
if (err)
return err;
} else if (set->flags & NFT_SET_TIMEOUT) {
} else if (set->flags & NFT_SET_TIMEOUT &&
!(flags & NFT_SET_ELEM_INTERVAL_END)) {
timeout = set->timeout;
}
@ -6024,7 +6025,8 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
err = -EOPNOTSUPP;
goto err_set_elem_expr;
}
} else if (set->num_exprs > 0) {
} else if (set->num_exprs > 0 &&
!(flags & NFT_SET_ELEM_INTERVAL_END)) {
err = nft_set_elem_expr_clone(ctx, set, expr_array);
if (err < 0)
goto err_set_elem_expr_clone;

View file

@ -98,7 +98,7 @@ static void nft_ct_get_eval(const struct nft_expr *expr,
return;
#ifdef CONFIG_NF_CONNTRACK_MARK
case NFT_CT_MARK:
*dest = ct->mark;
*dest = READ_ONCE(ct->mark);
return;
#endif
#ifdef CONFIG_NF_CONNTRACK_SECMARK
@ -297,8 +297,8 @@ static void nft_ct_set_eval(const struct nft_expr *expr,
switch (priv->key) {
#ifdef CONFIG_NF_CONNTRACK_MARK
case NFT_CT_MARK:
if (ct->mark != value) {
ct->mark = value;
if (READ_ONCE(ct->mark) != value) {
WRITE_ONCE(ct->mark, value);
nf_conntrack_event_cache(IPCT_MARK, ct);
}
break;

View file

@ -30,6 +30,7 @@ connmark_tg_shift(struct sk_buff *skb, const struct xt_connmark_tginfo2 *info)
u_int32_t new_targetmark;
struct nf_conn *ct;
u_int32_t newmark;
u_int32_t oldmark;
ct = nf_ct_get(skb, &ctinfo);
if (ct == NULL)
@ -37,14 +38,15 @@ connmark_tg_shift(struct sk_buff *skb, const struct xt_connmark_tginfo2 *info)
switch (info->mode) {
case XT_CONNMARK_SET:
newmark = (ct->mark & ~info->ctmask) ^ info->ctmark;
oldmark = READ_ONCE(ct->mark);
newmark = (oldmark & ~info->ctmask) ^ info->ctmark;
if (info->shift_dir == D_SHIFT_RIGHT)
newmark >>= info->shift_bits;
else
newmark <<= info->shift_bits;
if (ct->mark != newmark) {
ct->mark = newmark;
if (READ_ONCE(ct->mark) != newmark) {
WRITE_ONCE(ct->mark, newmark);
nf_conntrack_event_cache(IPCT_MARK, ct);
}
break;
@ -55,15 +57,15 @@ connmark_tg_shift(struct sk_buff *skb, const struct xt_connmark_tginfo2 *info)
else
new_targetmark <<= info->shift_bits;
newmark = (ct->mark & ~info->ctmask) ^
newmark = (READ_ONCE(ct->mark) & ~info->ctmask) ^
new_targetmark;
if (ct->mark != newmark) {
ct->mark = newmark;
if (READ_ONCE(ct->mark) != newmark) {
WRITE_ONCE(ct->mark, newmark);
nf_conntrack_event_cache(IPCT_MARK, ct);
}
break;
case XT_CONNMARK_RESTORE:
new_targetmark = (ct->mark & info->ctmask);
new_targetmark = (READ_ONCE(ct->mark) & info->ctmask);
if (info->shift_dir == D_SHIFT_RIGHT)
new_targetmark >>= info->shift_bits;
else
@ -126,7 +128,7 @@ connmark_mt(const struct sk_buff *skb, struct xt_action_param *par)
if (ct == NULL)
return false;
return ((ct->mark & info->mask) == info->mark) ^ info->invert;
return ((READ_ONCE(ct->mark) & info->mask) == info->mark) ^ info->invert;
}
static int connmark_mt_check(const struct xt_mtchk_param *par)

View file

@ -542,7 +542,7 @@ static int nci_open_device(struct nci_dev *ndev)
skb_queue_purge(&ndev->tx_q);
ndev->ops->close(ndev);
ndev->flags = 0;
ndev->flags &= BIT(NCI_UNREG);
}
done:

View file

@ -279,8 +279,10 @@ void nci_rx_data_packet(struct nci_dev *ndev, struct sk_buff *skb)
nci_plen(skb->data));
conn_info = nci_get_conn_info_by_conn_id(ndev, nci_conn_id(skb->data));
if (!conn_info)
if (!conn_info) {
kfree_skb(skb);
return;
}
/* strip the nci data header */
skb_pull(skb, NCI_DATA_HDR_SIZE);

View file

@ -152,7 +152,7 @@ static u8 ovs_ct_get_state(enum ip_conntrack_info ctinfo)
static u32 ovs_ct_get_mark(const struct nf_conn *ct)
{
#if IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)
return ct ? ct->mark : 0;
return ct ? READ_ONCE(ct->mark) : 0;
#else
return 0;
#endif
@ -340,9 +340,9 @@ static int ovs_ct_set_mark(struct nf_conn *ct, struct sw_flow_key *key,
#if IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)
u32 new_mark;
new_mark = ct_mark | (ct->mark & ~(mask));
if (ct->mark != new_mark) {
ct->mark = new_mark;
new_mark = ct_mark | (READ_ONCE(ct->mark) & ~(mask));
if (READ_ONCE(ct->mark) != new_mark) {
WRITE_ONCE(ct->mark, new_mark);
if (nf_ct_is_confirmed(ct))
nf_conntrack_event_cache(IPCT_MARK, ct);
key->ct.mark = new_mark;

View file

@ -399,6 +399,7 @@ enum rxrpc_conn_proto_state {
struct rxrpc_bundle {
struct rxrpc_conn_parameters params;
refcount_t ref;
atomic_t active; /* Number of active users */
unsigned int debug_id;
bool try_upgrade; /* True if the bundle is attempting upgrade */
bool alloc_conn; /* True if someone's getting a conn */

Some files were not shown because too many files have changed in this diff Show more