mirror of
https://github.com/Fishwaldo/linux-bl808.git
synced 2025-03-18 04:54:52 +00:00
Networking fixes for 5.15-rc7, including fixes from netfilter, and can.
Current release - regressions: - revert "vrf: reset skb conntrack connection on VRF rcv", there are valid uses for previous behavior - can: m_can: fix iomap_read_fifo() and iomap_write_fifo() Current release - new code bugs: - mlx5: e-switch, return correct error code on group creation failure Previous releases - regressions: - sctp: fix transport encap_port update in sctp_vtag_verify - stmmac: fix E2E delay mechanism (in PTP timestamping) Previous releases - always broken: - netfilter: ip6t_rt: fix out-of-bounds read of ipv6_rt_hdr - netfilter: xt_IDLETIMER: fix out-of-bound read caused by lack of init - netfilter: ipvs: make global sysctl read-only in non-init netns - tcp: md5: fix selection between vrf and non-vrf keys - ipv6: count rx stats on the orig netdev when forwarding - bridge: mcast: use multicast_membership_interval for IGMPv3 - can: - j1939: fix UAF for rx_kref of j1939_priv abort sessions on receiving bad messages - isotp: fix TX buffer concurrent access in isotp_sendmsg() fix return error on FC timeout on TX path - ice: fix re-init of RDMA Tx queues and crash if RDMA was not inited - hns3: schedule the polling again when allocation fails, prevent stalls - drivers: add missing of_node_put() when aborting for_each_available_child_of_node() - ptp: fix possible memory leak and UAF in ptp_clock_register() - e1000e: fix packet loss in burst mode on Tiger Lake and later - mlx5e: ipsec: fix more checksum offload issues Signed-off-by: Jakub Kicinski <kuba@kernel.org> -----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEE6jPA+I1ugmIBA4hXMUZtbf5SIrsFAmFxgHwACgkQMUZtbf5S IrvFgw//T73aR3B2Xvz5/1rtglfmtcUqFQsyGDXGD5HnfbAbsRcz8vcQ/mTsExl7 +mJY/ZuQefsD7UQDyg3GNhbgf1+pEjHC81ryeNsfET7+JxgYLD3NEYSBYUqIFZUo gStAStGBG+ClQUaqlkGFyyf6GrqwpmxZKRr6F9fUsufQ14m9tvcT/QPcrXL4q7qX Fz644yUe/IvKnuJDHJVZsc8UXR9NTPCyCNJT9kVewwPMIMEc/xMOg5QONLZT0TlC Zk4XJIqlBBEQWrN/QwrGXm82aO+3gQZyD5K9AvpczgcBjOr6FJOmN6zkQrqNNWaC 2wPAfWi7DALPtOZR6lCxoeWfLRfdn1ZOn5x2z5xrtAXCV2FTaMg8in9TzJ57qmcb /l43QzcNGSj1ytyny8pqgdsX2MSqs0O5VSG4egMtz7TeU/rs7uAx2IVHbPT8CHop PvhVHeUeu9lGu+FUK8piQbb5aVpbA9qlOj/rXNrHDIxdA9McQgVs+tljNG4X5KtX L7BR84wNg98HtIINVx6RjYz9lOpG1qBuw5RCiqiAaN1RBY7lYAhMaAE6U3azjgC+ AIz/MacNuAz/oTuutQB6/0WZDDJhy4WEy3TrDLlpQNz6yIrpKFN+ftyF6DuVUSMH PmtZ4E/DLooQL5KwuoDdYDH1gSMlggBejeGHTFJ+RUMuvRePZQ8= =Hwqr -----END PGP SIGNATURE----- Merge tag 'net-5.15-rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net Pull networking fixes from Jakub Kicinski: "Including fixes from netfilter, and can. We'll have one more fix for a socket accounting regression, it's still getting polished. Otherwise things look fine. Current release - regressions: - revert "vrf: reset skb conntrack connection on VRF rcv", there are valid uses for previous behavior - can: m_can: fix iomap_read_fifo() and iomap_write_fifo() Current release - new code bugs: - mlx5: e-switch, return correct error code on group creation failure Previous releases - regressions: - sctp: fix transport encap_port update in sctp_vtag_verify - stmmac: fix E2E delay mechanism (in PTP timestamping) Previous releases - always broken: - netfilter: ip6t_rt: fix out-of-bounds read of ipv6_rt_hdr - netfilter: xt_IDLETIMER: fix out-of-bound read caused by lack of init - netfilter: ipvs: make global sysctl read-only in non-init netns - tcp: md5: fix selection between vrf and non-vrf keys - ipv6: count rx stats on the orig netdev when forwarding - bridge: mcast: use multicast_membership_interval for IGMPv3 - can: - j1939: fix UAF for rx_kref of j1939_priv abort sessions on receiving bad messages - isotp: fix TX buffer concurrent access in isotp_sendmsg() fix return error on FC timeout on TX path - ice: fix re-init of RDMA Tx queues and crash if RDMA was not inited - hns3: schedule the polling again when allocation fails, prevent stalls - drivers: add missing of_node_put() when aborting for_each_available_child_of_node() - ptp: fix possible memory leak and UAF in ptp_clock_register() - e1000e: fix packet loss in burst mode on Tiger Lake and later - mlx5e: ipsec: fix more checksum offload issues" * tag 'net-5.15-rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (75 commits) usbnet: sanity check for maxpacket net: enetc: make sure all traffic classes can send large frames net: enetc: fix ethtool counter name for PM0_TERR ptp: free 'vclock_index' in ptp_clock_release() sfc: Don't use netif_info before net_device setup sfc: Export fibre-specific supported link modes net/mlx5e: IPsec: Fix work queue entry ethernet segment checksum flags net/mlx5e: IPsec: Fix a misuse of the software parser's fields net/mlx5e: Fix vlan data lost during suspend flow net/mlx5: E-switch, Return correct error code on group creation failure net/mlx5: Lag, change multipath and bonding to be mutually exclusive ice: Add missing E810 device ids igc: Update I226_K device ID e1000e: Fix packet loss on Tiger Lake and later e1000e: Separate TGP board type from SPT ptp: Fix possible memory leak in ptp_clock_register() net: stmmac: Fix E2E delay mechanism nfc: st95hf: Make spi remove() callback return zero net: hns3: disable sriov before unload hclge layer net: hns3: fix vf reset workqueue cannot exit ...
This commit is contained in:
commit
6c2c712767
92 changed files with 913 additions and 301 deletions
|
@ -30,10 +30,11 @@ The ``ice`` driver reports the following versions
|
|||
PHY, link, etc.
|
||||
* - ``fw.mgmt.api``
|
||||
- running
|
||||
- 1.5
|
||||
- 2-digit version number of the API exported over the AdminQ by the
|
||||
management firmware. Used by the driver to identify what commands
|
||||
are supported.
|
||||
- 1.5.1
|
||||
- 3-digit version number (major.minor.patch) of the API exported over
|
||||
the AdminQ by the management firmware. Used by the driver to
|
||||
identify what commands are supported. Historical versions of the
|
||||
kernel only displayed a 2-digit version number (major.minor).
|
||||
* - ``fw.mgmt.build``
|
||||
- running
|
||||
- 0x305d955f
|
||||
|
|
|
@ -59,11 +59,11 @@ specified with a ``sockaddr`` type, with a single-byte endpoint address:
|
|||
};
|
||||
|
||||
struct sockaddr_mctp {
|
||||
unsigned short int smctp_family;
|
||||
int smctp_network;
|
||||
struct mctp_addr smctp_addr;
|
||||
__u8 smctp_type;
|
||||
__u8 smctp_tag;
|
||||
__kernel_sa_family_t smctp_family;
|
||||
unsigned int smctp_network;
|
||||
struct mctp_addr smctp_addr;
|
||||
__u8 smctp_type;
|
||||
__u8 smctp_tag;
|
||||
};
|
||||
|
||||
#define MCTP_NET_ANY 0x0
|
||||
|
|
|
@ -1994,14 +1994,14 @@ setup_hw(struct hfc_pci *hc)
|
|||
pci_set_master(hc->pdev);
|
||||
if (!hc->irq) {
|
||||
printk(KERN_WARNING "HFC-PCI: No IRQ for PCI card found\n");
|
||||
return 1;
|
||||
return -EINVAL;
|
||||
}
|
||||
hc->hw.pci_io =
|
||||
(char __iomem *)(unsigned long)hc->pdev->resource[1].start;
|
||||
|
||||
if (!hc->hw.pci_io) {
|
||||
printk(KERN_WARNING "HFC-PCI: No IO-Mem for PCI card found\n");
|
||||
return 1;
|
||||
return -ENOMEM;
|
||||
}
|
||||
/* Allocate memory for FIFOS */
|
||||
/* the memory needs to be on a 32k boundary within the first 4G */
|
||||
|
@ -2012,7 +2012,7 @@ setup_hw(struct hfc_pci *hc)
|
|||
if (!buffer) {
|
||||
printk(KERN_WARNING
|
||||
"HFC-PCI: Error allocating memory for FIFO!\n");
|
||||
return 1;
|
||||
return -ENOMEM;
|
||||
}
|
||||
hc->hw.fifos = buffer;
|
||||
pci_write_config_dword(hc->pdev, 0x80, hc->hw.dmahandle);
|
||||
|
@ -2022,7 +2022,7 @@ setup_hw(struct hfc_pci *hc)
|
|||
"HFC-PCI: Error in ioremap for PCI!\n");
|
||||
dma_free_coherent(&hc->pdev->dev, 0x8000, hc->hw.fifos,
|
||||
hc->hw.dmahandle);
|
||||
return 1;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
printk(KERN_INFO
|
||||
|
|
|
@ -32,8 +32,13 @@ static u32 iomap_read_reg(struct m_can_classdev *cdev, int reg)
|
|||
static int iomap_read_fifo(struct m_can_classdev *cdev, int offset, void *val, size_t val_count)
|
||||
{
|
||||
struct m_can_plat_priv *priv = cdev_to_priv(cdev);
|
||||
void __iomem *src = priv->mram_base + offset;
|
||||
|
||||
ioread32_rep(priv->mram_base + offset, val, val_count);
|
||||
while (val_count--) {
|
||||
*(unsigned int *)val = ioread32(src);
|
||||
val += 4;
|
||||
src += 4;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -51,8 +56,13 @@ static int iomap_write_fifo(struct m_can_classdev *cdev, int offset,
|
|||
const void *val, size_t val_count)
|
||||
{
|
||||
struct m_can_plat_priv *priv = cdev_to_priv(cdev);
|
||||
void __iomem *dst = priv->mram_base + offset;
|
||||
|
||||
iowrite32_rep(priv->base + offset, val, val_count);
|
||||
while (val_count--) {
|
||||
iowrite32(*(unsigned int *)val, dst);
|
||||
val += 4;
|
||||
dst += 4;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -846,10 +846,12 @@ static int __maybe_unused rcar_can_suspend(struct device *dev)
|
|||
struct rcar_can_priv *priv = netdev_priv(ndev);
|
||||
u16 ctlr;
|
||||
|
||||
if (netif_running(ndev)) {
|
||||
netif_stop_queue(ndev);
|
||||
netif_device_detach(ndev);
|
||||
}
|
||||
if (!netif_running(ndev))
|
||||
return 0;
|
||||
|
||||
netif_stop_queue(ndev);
|
||||
netif_device_detach(ndev);
|
||||
|
||||
ctlr = readw(&priv->regs->ctlr);
|
||||
ctlr |= RCAR_CAN_CTLR_CANM_HALT;
|
||||
writew(ctlr, &priv->regs->ctlr);
|
||||
|
@ -868,6 +870,9 @@ static int __maybe_unused rcar_can_resume(struct device *dev)
|
|||
u16 ctlr;
|
||||
int err;
|
||||
|
||||
if (!netif_running(ndev))
|
||||
return 0;
|
||||
|
||||
err = clk_enable(priv->clk);
|
||||
if (err) {
|
||||
netdev_err(ndev, "clk_enable() failed, error %d\n", err);
|
||||
|
@ -881,10 +886,9 @@ static int __maybe_unused rcar_can_resume(struct device *dev)
|
|||
writew(ctlr, &priv->regs->ctlr);
|
||||
priv->can.state = CAN_STATE_ERROR_ACTIVE;
|
||||
|
||||
if (netif_running(ndev)) {
|
||||
netif_device_attach(ndev);
|
||||
netif_start_queue(ndev);
|
||||
}
|
||||
netif_device_attach(ndev);
|
||||
netif_start_queue(ndev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -752,16 +752,15 @@ static void peak_pci_remove(struct pci_dev *pdev)
|
|||
struct net_device *prev_dev = chan->prev_dev;
|
||||
|
||||
dev_info(&pdev->dev, "removing device %s\n", dev->name);
|
||||
/* do that only for first channel */
|
||||
if (!prev_dev && chan->pciec_card)
|
||||
peak_pciec_remove(chan->pciec_card);
|
||||
unregister_sja1000dev(dev);
|
||||
free_sja1000dev(dev);
|
||||
dev = prev_dev;
|
||||
|
||||
if (!dev) {
|
||||
/* do that only for first channel */
|
||||
if (chan->pciec_card)
|
||||
peak_pciec_remove(chan->pciec_card);
|
||||
if (!dev)
|
||||
break;
|
||||
}
|
||||
priv = netdev_priv(dev);
|
||||
chan = priv->priv;
|
||||
}
|
||||
|
|
|
@ -551,11 +551,10 @@ static int pcan_usb_fd_decode_status(struct pcan_usb_fd_if *usb_if,
|
|||
} else if (sm->channel_p_w_b & PUCAN_BUS_WARNING) {
|
||||
new_state = CAN_STATE_ERROR_WARNING;
|
||||
} else {
|
||||
/* no error bit (so, no error skb, back to active state) */
|
||||
dev->can.state = CAN_STATE_ERROR_ACTIVE;
|
||||
/* back to (or still in) ERROR_ACTIVE state */
|
||||
new_state = CAN_STATE_ERROR_ACTIVE;
|
||||
pdev->bec.txerr = 0;
|
||||
pdev->bec.rxerr = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* state hasn't changed */
|
||||
|
@ -568,8 +567,7 @@ static int pcan_usb_fd_decode_status(struct pcan_usb_fd_if *usb_if,
|
|||
|
||||
/* allocate an skb to store the error frame */
|
||||
skb = alloc_can_err_skb(netdev, &cf);
|
||||
if (skb)
|
||||
can_change_state(netdev, cf, tx_state, rx_state);
|
||||
can_change_state(netdev, cf, tx_state, rx_state);
|
||||
|
||||
/* things must be done even in case of OOM */
|
||||
if (new_state == CAN_STATE_BUS_OFF)
|
||||
|
|
|
@ -230,7 +230,7 @@
|
|||
#define GSWIP_SDMA_PCTRLp(p) (0xBC0 + ((p) * 0x6))
|
||||
#define GSWIP_SDMA_PCTRL_EN BIT(0) /* SDMA Port Enable */
|
||||
#define GSWIP_SDMA_PCTRL_FCEN BIT(1) /* Flow Control Enable */
|
||||
#define GSWIP_SDMA_PCTRL_PAUFWD BIT(1) /* Pause Frame Forwarding */
|
||||
#define GSWIP_SDMA_PCTRL_PAUFWD BIT(3) /* Pause Frame Forwarding */
|
||||
|
||||
#define GSWIP_TABLE_ACTIVE_VLAN 0x01
|
||||
#define GSWIP_TABLE_VLAN_MAPPING 0x02
|
||||
|
|
|
@ -1035,9 +1035,6 @@ mt7530_port_enable(struct dsa_switch *ds, int port,
|
|||
{
|
||||
struct mt7530_priv *priv = ds->priv;
|
||||
|
||||
if (!dsa_is_user_port(ds, port))
|
||||
return 0;
|
||||
|
||||
mutex_lock(&priv->reg_mutex);
|
||||
|
||||
/* Allow the user port gets connected to the cpu port and also
|
||||
|
@ -1060,9 +1057,6 @@ mt7530_port_disable(struct dsa_switch *ds, int port)
|
|||
{
|
||||
struct mt7530_priv *priv = ds->priv;
|
||||
|
||||
if (!dsa_is_user_port(ds, port))
|
||||
return;
|
||||
|
||||
mutex_lock(&priv->reg_mutex);
|
||||
|
||||
/* Clear up all port matrix which could be restored in the next
|
||||
|
@ -3211,7 +3205,7 @@ mt7530_probe(struct mdio_device *mdiodev)
|
|||
return -ENOMEM;
|
||||
|
||||
priv->ds->dev = &mdiodev->dev;
|
||||
priv->ds->num_ports = DSA_MAX_PORTS;
|
||||
priv->ds->num_ports = MT7530_NUM_PORTS;
|
||||
|
||||
/* Use medatek,mcm property to distinguish hardware type that would
|
||||
* casues a little bit differences on power-on sequence.
|
||||
|
|
|
@ -1193,7 +1193,7 @@ static int nic_register_interrupts(struct nicpf *nic)
|
|||
dev_err(&nic->pdev->dev,
|
||||
"Request for #%d msix vectors failed, returned %d\n",
|
||||
nic->num_vec, ret);
|
||||
return 1;
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Register mailbox interrupt handler */
|
||||
|
|
|
@ -1224,7 +1224,7 @@ static int nicvf_register_misc_interrupt(struct nicvf *nic)
|
|||
if (ret < 0) {
|
||||
netdev_err(nic->netdev,
|
||||
"Req for #%d msix vectors failed\n", nic->num_vec);
|
||||
return 1;
|
||||
return ret;
|
||||
}
|
||||
|
||||
sprintf(nic->irq_name[irq], "%s Mbox", "NICVF");
|
||||
|
@ -1243,7 +1243,7 @@ static int nicvf_register_misc_interrupt(struct nicvf *nic)
|
|||
if (!nicvf_check_pf_ready(nic)) {
|
||||
nicvf_disable_intr(nic, NICVF_INTR_MBOX, 0);
|
||||
nicvf_unregister_interrupts(nic);
|
||||
return 1;
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -157,7 +157,7 @@ static const struct {
|
|||
{ ENETC_PM0_TFRM, "MAC tx frames" },
|
||||
{ ENETC_PM0_TFCS, "MAC tx fcs errors" },
|
||||
{ ENETC_PM0_TVLAN, "MAC tx VLAN frames" },
|
||||
{ ENETC_PM0_TERR, "MAC tx frames" },
|
||||
{ ENETC_PM0_TERR, "MAC tx frame errors" },
|
||||
{ ENETC_PM0_TUCA, "MAC tx unicast frames" },
|
||||
{ ENETC_PM0_TMCA, "MAC tx multicast frames" },
|
||||
{ ENETC_PM0_TBCA, "MAC tx broadcast frames" },
|
||||
|
|
|
@ -517,10 +517,13 @@ static void enetc_port_si_configure(struct enetc_si *si)
|
|||
|
||||
static void enetc_configure_port_mac(struct enetc_hw *hw)
|
||||
{
|
||||
int tc;
|
||||
|
||||
enetc_port_wr(hw, ENETC_PM0_MAXFRM,
|
||||
ENETC_SET_MAXFRM(ENETC_RX_MAXFRM_SIZE));
|
||||
|
||||
enetc_port_wr(hw, ENETC_PTCMSDUR(0), ENETC_MAC_MAXFRM_SIZE);
|
||||
for (tc = 0; tc < 8; tc++)
|
||||
enetc_port_wr(hw, ENETC_PTCMSDUR(tc), ENETC_MAC_MAXFRM_SIZE);
|
||||
|
||||
enetc_port_wr(hw, ENETC_PM0_CMD_CFG, ENETC_PM0_CMD_PHY_TX_EN |
|
||||
ENETC_PM0_CMD_TXP | ENETC_PM0_PROMISC);
|
||||
|
|
|
@ -10,6 +10,27 @@ static LIST_HEAD(hnae3_ae_algo_list);
|
|||
static LIST_HEAD(hnae3_client_list);
|
||||
static LIST_HEAD(hnae3_ae_dev_list);
|
||||
|
||||
void hnae3_unregister_ae_algo_prepare(struct hnae3_ae_algo *ae_algo)
|
||||
{
|
||||
const struct pci_device_id *pci_id;
|
||||
struct hnae3_ae_dev *ae_dev;
|
||||
|
||||
if (!ae_algo)
|
||||
return;
|
||||
|
||||
list_for_each_entry(ae_dev, &hnae3_ae_dev_list, node) {
|
||||
if (!hnae3_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B))
|
||||
continue;
|
||||
|
||||
pci_id = pci_match_id(ae_algo->pdev_id_table, ae_dev->pdev);
|
||||
if (!pci_id)
|
||||
continue;
|
||||
if (IS_ENABLED(CONFIG_PCI_IOV))
|
||||
pci_disable_sriov(ae_dev->pdev);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(hnae3_unregister_ae_algo_prepare);
|
||||
|
||||
/* we are keeping things simple and using single lock for all the
|
||||
* list. This is a non-critical code so other updations, if happen
|
||||
* in parallel, can wait.
|
||||
|
|
|
@ -853,6 +853,7 @@ struct hnae3_handle {
|
|||
int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev);
|
||||
void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev);
|
||||
|
||||
void hnae3_unregister_ae_algo_prepare(struct hnae3_ae_algo *ae_algo);
|
||||
void hnae3_unregister_ae_algo(struct hnae3_ae_algo *ae_algo);
|
||||
void hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo);
|
||||
|
||||
|
|
|
@ -1847,7 +1847,6 @@ void hns3_shinfo_pack(struct skb_shared_info *shinfo, __u32 *size)
|
|||
|
||||
static int hns3_skb_linearize(struct hns3_enet_ring *ring,
|
||||
struct sk_buff *skb,
|
||||
u8 max_non_tso_bd_num,
|
||||
unsigned int bd_num)
|
||||
{
|
||||
/* 'bd_num == UINT_MAX' means the skb' fraglist has a
|
||||
|
@ -1864,8 +1863,7 @@ static int hns3_skb_linearize(struct hns3_enet_ring *ring,
|
|||
* will not help.
|
||||
*/
|
||||
if (skb->len > HNS3_MAX_TSO_SIZE ||
|
||||
(!skb_is_gso(skb) && skb->len >
|
||||
HNS3_MAX_NON_TSO_SIZE(max_non_tso_bd_num))) {
|
||||
(!skb_is_gso(skb) && skb->len > HNS3_MAX_NON_TSO_SIZE)) {
|
||||
u64_stats_update_begin(&ring->syncp);
|
||||
ring->stats.hw_limitation++;
|
||||
u64_stats_update_end(&ring->syncp);
|
||||
|
@ -1900,8 +1898,7 @@ static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring,
|
|||
goto out;
|
||||
}
|
||||
|
||||
if (hns3_skb_linearize(ring, skb, max_non_tso_bd_num,
|
||||
bd_num))
|
||||
if (hns3_skb_linearize(ring, skb, bd_num))
|
||||
return -ENOMEM;
|
||||
|
||||
bd_num = hns3_tx_bd_count(skb->len);
|
||||
|
@ -3258,6 +3255,7 @@ static void hns3_buffer_detach(struct hns3_enet_ring *ring, int i)
|
|||
{
|
||||
hns3_unmap_buffer(ring, &ring->desc_cb[i]);
|
||||
ring->desc[i].addr = 0;
|
||||
ring->desc_cb[i].refill = 0;
|
||||
}
|
||||
|
||||
static void hns3_free_buffer_detach(struct hns3_enet_ring *ring, int i,
|
||||
|
@ -3336,6 +3334,7 @@ static int hns3_alloc_and_attach_buffer(struct hns3_enet_ring *ring, int i)
|
|||
|
||||
ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma +
|
||||
ring->desc_cb[i].page_offset);
|
||||
ring->desc_cb[i].refill = 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -3365,6 +3364,7 @@ static void hns3_replace_buffer(struct hns3_enet_ring *ring, int i,
|
|||
{
|
||||
hns3_unmap_buffer(ring, &ring->desc_cb[i]);
|
||||
ring->desc_cb[i] = *res_cb;
|
||||
ring->desc_cb[i].refill = 1;
|
||||
ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma +
|
||||
ring->desc_cb[i].page_offset);
|
||||
ring->desc[i].rx.bd_base_info = 0;
|
||||
|
@ -3373,6 +3373,7 @@ static void hns3_replace_buffer(struct hns3_enet_ring *ring, int i,
|
|||
static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i)
|
||||
{
|
||||
ring->desc_cb[i].reuse_flag = 0;
|
||||
ring->desc_cb[i].refill = 1;
|
||||
ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma +
|
||||
ring->desc_cb[i].page_offset);
|
||||
ring->desc[i].rx.bd_base_info = 0;
|
||||
|
@ -3479,10 +3480,14 @@ static int hns3_desc_unused(struct hns3_enet_ring *ring)
|
|||
int ntc = ring->next_to_clean;
|
||||
int ntu = ring->next_to_use;
|
||||
|
||||
if (unlikely(ntc == ntu && !ring->desc_cb[ntc].refill))
|
||||
return ring->desc_num;
|
||||
|
||||
return ((ntc >= ntu) ? 0 : ring->desc_num) + ntc - ntu;
|
||||
}
|
||||
|
||||
static void hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring,
|
||||
/* Return true if there is any allocation failure */
|
||||
static bool hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring,
|
||||
int cleand_count)
|
||||
{
|
||||
struct hns3_desc_cb *desc_cb;
|
||||
|
@ -3507,7 +3512,10 @@ static void hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring,
|
|||
hns3_rl_err(ring_to_netdev(ring),
|
||||
"alloc rx buffer failed: %d\n",
|
||||
ret);
|
||||
break;
|
||||
|
||||
writel(i, ring->tqp->io_base +
|
||||
HNS3_RING_RX_RING_HEAD_REG);
|
||||
return true;
|
||||
}
|
||||
hns3_replace_buffer(ring, ring->next_to_use, &res_cbs);
|
||||
|
||||
|
@ -3520,6 +3528,7 @@ static void hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring,
|
|||
}
|
||||
|
||||
writel(i, ring->tqp->io_base + HNS3_RING_RX_RING_HEAD_REG);
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool hns3_can_reuse_page(struct hns3_desc_cb *cb)
|
||||
|
@ -3824,6 +3833,7 @@ static void hns3_rx_ring_move_fw(struct hns3_enet_ring *ring)
|
|||
{
|
||||
ring->desc[ring->next_to_clean].rx.bd_base_info &=
|
||||
cpu_to_le32(~BIT(HNS3_RXD_VLD_B));
|
||||
ring->desc_cb[ring->next_to_clean].refill = 0;
|
||||
ring->next_to_clean += 1;
|
||||
|
||||
if (unlikely(ring->next_to_clean == ring->desc_num))
|
||||
|
@ -4170,6 +4180,7 @@ int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget,
|
|||
{
|
||||
#define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
|
||||
int unused_count = hns3_desc_unused(ring);
|
||||
bool failure = false;
|
||||
int recv_pkts = 0;
|
||||
int err;
|
||||
|
||||
|
@ -4178,9 +4189,9 @@ int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget,
|
|||
while (recv_pkts < budget) {
|
||||
/* Reuse or realloc buffers */
|
||||
if (unused_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) {
|
||||
hns3_nic_alloc_rx_buffers(ring, unused_count);
|
||||
unused_count = hns3_desc_unused(ring) -
|
||||
ring->pending_buf;
|
||||
failure = failure ||
|
||||
hns3_nic_alloc_rx_buffers(ring, unused_count);
|
||||
unused_count = 0;
|
||||
}
|
||||
|
||||
/* Poll one pkt */
|
||||
|
@ -4199,11 +4210,7 @@ int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget,
|
|||
}
|
||||
|
||||
out:
|
||||
/* Make all data has been write before submit */
|
||||
if (unused_count > 0)
|
||||
hns3_nic_alloc_rx_buffers(ring, unused_count);
|
||||
|
||||
return recv_pkts;
|
||||
return failure ? budget : recv_pkts;
|
||||
}
|
||||
|
||||
static void hns3_update_rx_int_coalesce(struct hns3_enet_tqp_vector *tqp_vector)
|
||||
|
|
|
@ -186,11 +186,9 @@ enum hns3_nic_state {
|
|||
|
||||
#define HNS3_MAX_BD_SIZE 65535
|
||||
#define HNS3_MAX_TSO_BD_NUM 63U
|
||||
#define HNS3_MAX_TSO_SIZE \
|
||||
(HNS3_MAX_BD_SIZE * HNS3_MAX_TSO_BD_NUM)
|
||||
#define HNS3_MAX_TSO_SIZE 1048576U
|
||||
#define HNS3_MAX_NON_TSO_SIZE 9728U
|
||||
|
||||
#define HNS3_MAX_NON_TSO_SIZE(max_non_tso_bd_num) \
|
||||
(HNS3_MAX_BD_SIZE * (max_non_tso_bd_num))
|
||||
|
||||
#define HNS3_VECTOR_GL0_OFFSET 0x100
|
||||
#define HNS3_VECTOR_GL1_OFFSET 0x200
|
||||
|
@ -332,6 +330,7 @@ struct hns3_desc_cb {
|
|||
u32 length; /* length of the buffer */
|
||||
|
||||
u16 reuse_flag;
|
||||
u16 refill;
|
||||
|
||||
/* desc type, used by the ring user to mark the type of the priv data */
|
||||
u16 type;
|
||||
|
|
|
@ -137,6 +137,15 @@ static int hclge_ets_sch_mode_validate(struct hclge_dev *hdev,
|
|||
*changed = true;
|
||||
break;
|
||||
case IEEE_8021QAZ_TSA_ETS:
|
||||
/* The hardware will switch to sp mode if bandwidth is
|
||||
* 0, so limit ets bandwidth must be greater than 0.
|
||||
*/
|
||||
if (!ets->tc_tx_bw[i]) {
|
||||
dev_err(&hdev->pdev->dev,
|
||||
"tc%u ets bw cannot be 0\n", i);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (hdev->tm_info.tc_info[i].tc_sch_mode !=
|
||||
HCLGE_SCH_MODE_DWRR)
|
||||
*changed = true;
|
||||
|
|
|
@ -1560,8 +1560,11 @@ static int hclge_config_tm_hw_err_int(struct hclge_dev *hdev, bool en)
|
|||
|
||||
/* configure TM QCN hw errors */
|
||||
hclge_cmd_setup_basic_desc(&desc, HCLGE_TM_QCN_MEM_INT_CFG, false);
|
||||
if (en)
|
||||
desc.data[0] = cpu_to_le32(HCLGE_TM_QCN_ERR_INT_TYPE);
|
||||
if (en) {
|
||||
desc.data[0] |= cpu_to_le32(HCLGE_TM_QCN_FIFO_INT_EN);
|
||||
desc.data[1] = cpu_to_le32(HCLGE_TM_QCN_MEM_ERR_INT_EN);
|
||||
}
|
||||
|
||||
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
|
||||
if (ret)
|
||||
|
|
|
@ -50,6 +50,8 @@
|
|||
#define HCLGE_PPP_MPF_ECC_ERR_INT3_EN 0x003F
|
||||
#define HCLGE_PPP_MPF_ECC_ERR_INT3_EN_MASK 0x003F
|
||||
#define HCLGE_TM_SCH_ECC_ERR_INT_EN 0x3
|
||||
#define HCLGE_TM_QCN_ERR_INT_TYPE 0x29
|
||||
#define HCLGE_TM_QCN_FIFO_INT_EN 0xFFFF00
|
||||
#define HCLGE_TM_QCN_MEM_ERR_INT_EN 0xFFFFFF
|
||||
#define HCLGE_NCSI_ERR_INT_EN 0x3
|
||||
#define HCLGE_NCSI_ERR_INT_TYPE 0x9
|
||||
|
|
|
@ -13065,6 +13065,7 @@ static int hclge_init(void)
|
|||
|
||||
static void hclge_exit(void)
|
||||
{
|
||||
hnae3_unregister_ae_algo_prepare(&ae_algo);
|
||||
hnae3_unregister_ae_algo(&ae_algo);
|
||||
destroy_workqueue(hclge_wq);
|
||||
}
|
||||
|
|
|
@ -752,6 +752,8 @@ static void hclge_tm_pg_info_init(struct hclge_dev *hdev)
|
|||
hdev->tm_info.pg_info[i].tc_bit_map = hdev->hw_tc_map;
|
||||
for (k = 0; k < hdev->tm_info.num_tc; k++)
|
||||
hdev->tm_info.pg_info[i].tc_dwrr[k] = BW_PERCENT;
|
||||
for (; k < HNAE3_MAX_TC; k++)
|
||||
hdev->tm_info.pg_info[i].tc_dwrr[k] = 0;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -2273,9 +2273,9 @@ static void hclgevf_reset_service_task(struct hclgevf_dev *hdev)
|
|||
hdev->reset_attempts = 0;
|
||||
|
||||
hdev->last_reset_time = jiffies;
|
||||
while ((hdev->reset_type =
|
||||
hclgevf_get_reset_level(hdev, &hdev->reset_pending))
|
||||
!= HNAE3_NONE_RESET)
|
||||
hdev->reset_type =
|
||||
hclgevf_get_reset_level(hdev, &hdev->reset_pending);
|
||||
if (hdev->reset_type != HNAE3_NONE_RESET)
|
||||
hclgevf_reset(hdev);
|
||||
} else if (test_and_clear_bit(HCLGEVF_RESET_REQUESTED,
|
||||
&hdev->reset_state)) {
|
||||
|
|
|
@ -113,7 +113,8 @@ enum e1000_boards {
|
|||
board_pch2lan,
|
||||
board_pch_lpt,
|
||||
board_pch_spt,
|
||||
board_pch_cnp
|
||||
board_pch_cnp,
|
||||
board_pch_tgp
|
||||
};
|
||||
|
||||
struct e1000_ps_page {
|
||||
|
@ -499,6 +500,7 @@ extern const struct e1000_info e1000_pch2_info;
|
|||
extern const struct e1000_info e1000_pch_lpt_info;
|
||||
extern const struct e1000_info e1000_pch_spt_info;
|
||||
extern const struct e1000_info e1000_pch_cnp_info;
|
||||
extern const struct e1000_info e1000_pch_tgp_info;
|
||||
extern const struct e1000_info e1000_es2_info;
|
||||
|
||||
void e1000e_ptp_init(struct e1000_adapter *adapter);
|
||||
|
|
|
@ -4813,7 +4813,7 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
|
|||
static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
|
||||
{
|
||||
struct e1000_mac_info *mac = &hw->mac;
|
||||
u32 ctrl_ext, txdctl, snoop;
|
||||
u32 ctrl_ext, txdctl, snoop, fflt_dbg;
|
||||
s32 ret_val;
|
||||
u16 i;
|
||||
|
||||
|
@ -4872,6 +4872,15 @@ static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
|
|||
snoop = (u32)~(PCIE_NO_SNOOP_ALL);
|
||||
e1000e_set_pcie_no_snoop(hw, snoop);
|
||||
|
||||
/* Enable workaround for packet loss issue on TGP PCH
|
||||
* Do not gate DMA clock from the modPHY block
|
||||
*/
|
||||
if (mac->type >= e1000_pch_tgp) {
|
||||
fflt_dbg = er32(FFLT_DBG);
|
||||
fflt_dbg |= E1000_FFLT_DBG_DONT_GATE_WAKE_DMA_CLK;
|
||||
ew32(FFLT_DBG, fflt_dbg);
|
||||
}
|
||||
|
||||
ctrl_ext = er32(CTRL_EXT);
|
||||
ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
|
||||
ew32(CTRL_EXT, ctrl_ext);
|
||||
|
@ -5992,3 +6001,23 @@ const struct e1000_info e1000_pch_cnp_info = {
|
|||
.phy_ops = &ich8_phy_ops,
|
||||
.nvm_ops = &spt_nvm_ops,
|
||||
};
|
||||
|
||||
const struct e1000_info e1000_pch_tgp_info = {
|
||||
.mac = e1000_pch_tgp,
|
||||
.flags = FLAG_IS_ICH
|
||||
| FLAG_HAS_WOL
|
||||
| FLAG_HAS_HW_TIMESTAMP
|
||||
| FLAG_HAS_CTRLEXT_ON_LOAD
|
||||
| FLAG_HAS_AMT
|
||||
| FLAG_HAS_FLASH
|
||||
| FLAG_HAS_JUMBO_FRAMES
|
||||
| FLAG_APME_IN_WUC,
|
||||
.flags2 = FLAG2_HAS_PHY_STATS
|
||||
| FLAG2_HAS_EEE,
|
||||
.pba = 26,
|
||||
.max_hw_frame_size = 9022,
|
||||
.get_variants = e1000_get_variants_ich8lan,
|
||||
.mac_ops = &ich8_mac_ops,
|
||||
.phy_ops = &ich8_phy_ops,
|
||||
.nvm_ops = &spt_nvm_ops,
|
||||
};
|
||||
|
|
|
@ -289,6 +289,9 @@
|
|||
/* Proprietary Latency Tolerance Reporting PCI Capability */
|
||||
#define E1000_PCI_LTR_CAP_LPT 0xA8
|
||||
|
||||
/* Don't gate wake DMA clock */
|
||||
#define E1000_FFLT_DBG_DONT_GATE_WAKE_DMA_CLK 0x1000
|
||||
|
||||
void e1000e_write_protect_nvm_ich8lan(struct e1000_hw *hw);
|
||||
void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
|
||||
bool state);
|
||||
|
|
|
@ -51,6 +51,7 @@ static const struct e1000_info *e1000_info_tbl[] = {
|
|||
[board_pch_lpt] = &e1000_pch_lpt_info,
|
||||
[board_pch_spt] = &e1000_pch_spt_info,
|
||||
[board_pch_cnp] = &e1000_pch_cnp_info,
|
||||
[board_pch_tgp] = &e1000_pch_tgp_info,
|
||||
};
|
||||
|
||||
struct e1000_reg_info {
|
||||
|
@ -7896,28 +7897,28 @@ static const struct pci_device_id e1000_pci_tbl[] = {
|
|||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CMP_I219_V11), board_pch_cnp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CMP_I219_LM12), board_pch_spt },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CMP_I219_V12), board_pch_spt },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_LM13), board_pch_cnp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_V13), board_pch_cnp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_LM14), board_pch_cnp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_V14), board_pch_cnp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_LM15), board_pch_cnp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_V15), board_pch_cnp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_LM23), board_pch_cnp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_V23), board_pch_cnp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_LM16), board_pch_cnp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_V16), board_pch_cnp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_LM17), board_pch_cnp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_V17), board_pch_cnp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_LM22), board_pch_cnp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_V22), board_pch_cnp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_LM18), board_pch_cnp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_V18), board_pch_cnp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_LM19), board_pch_cnp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_V19), board_pch_cnp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_LM20), board_pch_cnp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_V20), board_pch_cnp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_LM21), board_pch_cnp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_V21), board_pch_cnp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_LM13), board_pch_tgp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_V13), board_pch_tgp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_LM14), board_pch_tgp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_V14), board_pch_tgp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_LM15), board_pch_tgp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_V15), board_pch_tgp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_LM23), board_pch_tgp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_V23), board_pch_tgp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_LM16), board_pch_tgp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_V16), board_pch_tgp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_LM17), board_pch_tgp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_V17), board_pch_tgp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_LM22), board_pch_tgp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_V22), board_pch_tgp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_LM18), board_pch_tgp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_V18), board_pch_tgp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_LM19), board_pch_tgp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_V19), board_pch_tgp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_LM20), board_pch_tgp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_V20), board_pch_tgp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_LM21), board_pch_tgp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_V21), board_pch_tgp },
|
||||
|
||||
{ 0, 0, 0, 0, 0, 0, 0 } /* terminate list */
|
||||
};
|
||||
|
|
|
@ -25,6 +25,8 @@ static enum ice_status ice_set_mac_type(struct ice_hw *hw)
|
|||
case ICE_DEV_ID_E810C_BACKPLANE:
|
||||
case ICE_DEV_ID_E810C_QSFP:
|
||||
case ICE_DEV_ID_E810C_SFP:
|
||||
case ICE_DEV_ID_E810_XXV_BACKPLANE:
|
||||
case ICE_DEV_ID_E810_XXV_QSFP:
|
||||
case ICE_DEV_ID_E810_XXV_SFP:
|
||||
hw->mac_type = ICE_MAC_E810;
|
||||
break;
|
||||
|
|
|
@ -21,6 +21,10 @@
|
|||
#define ICE_DEV_ID_E810C_QSFP 0x1592
|
||||
/* Intel(R) Ethernet Controller E810-C for SFP */
|
||||
#define ICE_DEV_ID_E810C_SFP 0x1593
|
||||
/* Intel(R) Ethernet Controller E810-XXV for backplane */
|
||||
#define ICE_DEV_ID_E810_XXV_BACKPLANE 0x1599
|
||||
/* Intel(R) Ethernet Controller E810-XXV for QSFP */
|
||||
#define ICE_DEV_ID_E810_XXV_QSFP 0x159A
|
||||
/* Intel(R) Ethernet Controller E810-XXV for SFP */
|
||||
#define ICE_DEV_ID_E810_XXV_SFP 0x159B
|
||||
/* Intel(R) Ethernet Connection E823-C for backplane */
|
||||
|
|
|
@ -63,7 +63,8 @@ static int ice_info_fw_api(struct ice_pf *pf, struct ice_info_ctx *ctx)
|
|||
{
|
||||
struct ice_hw *hw = &pf->hw;
|
||||
|
||||
snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u", hw->api_maj_ver, hw->api_min_ver);
|
||||
snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u", hw->api_maj_ver,
|
||||
hw->api_min_ver, hw->api_patch);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -1668,7 +1668,7 @@ static u16 ice_tunnel_idx_to_entry(struct ice_hw *hw, enum ice_tunnel_type type,
|
|||
for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
|
||||
if (hw->tnl.tbl[i].valid &&
|
||||
hw->tnl.tbl[i].type == type &&
|
||||
idx--)
|
||||
idx-- == 0)
|
||||
return i;
|
||||
|
||||
WARN_ON_ONCE(1);
|
||||
|
@ -1828,7 +1828,7 @@ int ice_udp_tunnel_set_port(struct net_device *netdev, unsigned int table,
|
|||
u16 index;
|
||||
|
||||
tnl_type = ti->type == UDP_TUNNEL_TYPE_VXLAN ? TNL_VXLAN : TNL_GENEVE;
|
||||
index = ice_tunnel_idx_to_entry(&pf->hw, idx, tnl_type);
|
||||
index = ice_tunnel_idx_to_entry(&pf->hw, tnl_type, idx);
|
||||
|
||||
status = ice_create_tunnel(&pf->hw, index, tnl_type, ntohs(ti->port));
|
||||
if (status) {
|
||||
|
|
|
@ -2841,6 +2841,7 @@ void ice_napi_del(struct ice_vsi *vsi)
|
|||
*/
|
||||
int ice_vsi_release(struct ice_vsi *vsi)
|
||||
{
|
||||
enum ice_status err;
|
||||
struct ice_pf *pf;
|
||||
|
||||
if (!vsi->back)
|
||||
|
@ -2912,6 +2913,10 @@ int ice_vsi_release(struct ice_vsi *vsi)
|
|||
|
||||
ice_fltr_remove_all(vsi);
|
||||
ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx);
|
||||
err = ice_rm_vsi_rdma_cfg(vsi->port_info, vsi->idx);
|
||||
if (err)
|
||||
dev_err(ice_pf_to_dev(vsi->back), "Failed to remove RDMA scheduler config for VSI %u, err %d\n",
|
||||
vsi->vsi_num, err);
|
||||
ice_vsi_delete(vsi);
|
||||
ice_vsi_free_q_vectors(vsi);
|
||||
|
||||
|
@ -3092,6 +3097,10 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)
|
|||
prev_num_q_vectors = ice_vsi_rebuild_get_coalesce(vsi, coalesce);
|
||||
|
||||
ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx);
|
||||
ret = ice_rm_vsi_rdma_cfg(vsi->port_info, vsi->idx);
|
||||
if (ret)
|
||||
dev_err(ice_pf_to_dev(vsi->back), "Failed to remove RDMA scheduler config for VSI %u, err %d\n",
|
||||
vsi->vsi_num, ret);
|
||||
ice_vsi_free_q_vectors(vsi);
|
||||
|
||||
/* SR-IOV determines needed MSIX resources all at once instead of per
|
||||
|
|
|
@ -4224,6 +4224,9 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
|
|||
if (!pf)
|
||||
return -ENOMEM;
|
||||
|
||||
/* initialize Auxiliary index to invalid value */
|
||||
pf->aux_idx = -1;
|
||||
|
||||
/* set up for high or low DMA */
|
||||
err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
|
||||
if (err)
|
||||
|
@ -4615,7 +4618,8 @@ static void ice_remove(struct pci_dev *pdev)
|
|||
|
||||
ice_aq_cancel_waiting_tasks(pf);
|
||||
ice_unplug_aux_dev(pf);
|
||||
ida_free(&ice_aux_ida, pf->aux_idx);
|
||||
if (pf->aux_idx >= 0)
|
||||
ida_free(&ice_aux_ida, pf->aux_idx);
|
||||
set_bit(ICE_DOWN, pf->state);
|
||||
|
||||
mutex_destroy(&(&pf->hw)->fdir_fltr_lock);
|
||||
|
@ -5016,6 +5020,8 @@ static const struct pci_device_id ice_pci_tbl[] = {
|
|||
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_BACKPLANE), 0 },
|
||||
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_QSFP), 0 },
|
||||
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_SFP), 0 },
|
||||
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_BACKPLANE), 0 },
|
||||
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_QSFP), 0 },
|
||||
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_SFP), 0 },
|
||||
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_BACKPLANE), 0 },
|
||||
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_QSFP), 0 },
|
||||
|
|
|
@ -2070,6 +2070,19 @@ enum ice_status ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle)
|
|||
return ice_sched_rm_vsi_cfg(pi, vsi_handle, ICE_SCHED_NODE_OWNER_LAN);
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_rm_vsi_rdma_cfg - remove VSI and its RDMA children nodes
|
||||
* @pi: port information structure
|
||||
* @vsi_handle: software VSI handle
|
||||
*
|
||||
* This function clears the VSI and its RDMA children nodes from scheduler tree
|
||||
* for all TCs.
|
||||
*/
|
||||
enum ice_status ice_rm_vsi_rdma_cfg(struct ice_port_info *pi, u16 vsi_handle)
|
||||
{
|
||||
return ice_sched_rm_vsi_cfg(pi, vsi_handle, ICE_SCHED_NODE_OWNER_RDMA);
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_get_agg_info - get the aggregator ID
|
||||
* @hw: pointer to the hardware structure
|
||||
|
|
|
@ -89,6 +89,7 @@ enum ice_status
|
|||
ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs,
|
||||
u8 owner, bool enable);
|
||||
enum ice_status ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle);
|
||||
enum ice_status ice_rm_vsi_rdma_cfg(struct ice_port_info *pi, u16 vsi_handle);
|
||||
|
||||
/* Tx scheduler rate limiter functions */
|
||||
enum ice_status
|
||||
|
|
|
@ -22,8 +22,8 @@
|
|||
#define IGC_DEV_ID_I220_V 0x15F7
|
||||
#define IGC_DEV_ID_I225_K 0x3100
|
||||
#define IGC_DEV_ID_I225_K2 0x3101
|
||||
#define IGC_DEV_ID_I226_K 0x3102
|
||||
#define IGC_DEV_ID_I225_LMVP 0x5502
|
||||
#define IGC_DEV_ID_I226_K 0x5504
|
||||
#define IGC_DEV_ID_I225_IT 0x0D9F
|
||||
#define IGC_DEV_ID_I226_LM 0x125B
|
||||
#define IGC_DEV_ID_I226_V 0x125C
|
||||
|
|
|
@ -199,6 +199,9 @@ void mlx5e_disable_cvlan_filter(struct mlx5e_priv *priv);
|
|||
int mlx5e_create_flow_steering(struct mlx5e_priv *priv);
|
||||
void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv);
|
||||
|
||||
int mlx5e_fs_init(struct mlx5e_priv *priv);
|
||||
void mlx5e_fs_cleanup(struct mlx5e_priv *priv);
|
||||
|
||||
int mlx5e_add_vlan_trap(struct mlx5e_priv *priv, int trap_id, int tir_num);
|
||||
void mlx5e_remove_vlan_trap(struct mlx5e_priv *priv);
|
||||
int mlx5e_add_mac_trap(struct mlx5e_priv *priv, int trap_id, int tir_num);
|
||||
|
|
|
@ -10,6 +10,8 @@
|
|||
#include "en_tc.h"
|
||||
#include "rep/tc.h"
|
||||
#include "rep/neigh.h"
|
||||
#include "lag.h"
|
||||
#include "lag_mp.h"
|
||||
|
||||
struct mlx5e_tc_tun_route_attr {
|
||||
struct net_device *out_dev;
|
||||
|
|
|
@ -141,8 +141,7 @@ static void mlx5e_ipsec_set_swp(struct sk_buff *skb,
|
|||
* Pkt: MAC IP ESP IP L4
|
||||
*
|
||||
* Transport Mode:
|
||||
* SWP: OutL3 InL4
|
||||
* InL3
|
||||
* SWP: OutL3 OutL4
|
||||
* Pkt: MAC IP ESP L4
|
||||
*
|
||||
* Tunnel(VXLAN TCP/UDP) over Transport Mode
|
||||
|
@ -171,31 +170,35 @@ static void mlx5e_ipsec_set_swp(struct sk_buff *skb,
|
|||
return;
|
||||
|
||||
if (!xo->inner_ipproto) {
|
||||
eseg->swp_inner_l3_offset = skb_network_offset(skb) / 2;
|
||||
eseg->swp_inner_l4_offset = skb_inner_transport_offset(skb) / 2;
|
||||
if (skb->protocol == htons(ETH_P_IPV6))
|
||||
eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
|
||||
if (xo->proto == IPPROTO_UDP)
|
||||
switch (xo->proto) {
|
||||
case IPPROTO_UDP:
|
||||
eseg->swp_flags |= MLX5_ETH_WQE_SWP_OUTER_L4_UDP;
|
||||
fallthrough;
|
||||
case IPPROTO_TCP:
|
||||
/* IP | ESP | TCP */
|
||||
eseg->swp_outer_l4_offset = skb_inner_transport_offset(skb) / 2;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
/* Tunnel(VXLAN TCP/UDP) over Transport Mode */
|
||||
switch (xo->inner_ipproto) {
|
||||
case IPPROTO_UDP:
|
||||
eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP;
|
||||
return;
|
||||
fallthrough;
|
||||
case IPPROTO_TCP:
|
||||
eseg->swp_inner_l3_offset = skb_inner_network_offset(skb) / 2;
|
||||
eseg->swp_inner_l4_offset =
|
||||
(skb->csum_start + skb->head - skb->data) / 2;
|
||||
if (skb->protocol == htons(ETH_P_IPV6))
|
||||
eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/* Tunnel(VXLAN TCP/UDP) over Transport Mode */
|
||||
switch (xo->inner_ipproto) {
|
||||
case IPPROTO_UDP:
|
||||
eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP;
|
||||
fallthrough;
|
||||
case IPPROTO_TCP:
|
||||
eseg->swp_inner_l3_offset = skb_inner_network_offset(skb) / 2;
|
||||
eseg->swp_inner_l4_offset = (skb->csum_start + skb->head - skb->data) / 2;
|
||||
if (skb->protocol == htons(ETH_P_IPV6))
|
||||
eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
void mlx5e_ipsec_set_iv_esn(struct sk_buff *skb, struct xfrm_state *x,
|
||||
|
|
|
@ -1186,10 +1186,6 @@ static int mlx5e_create_vlan_table(struct mlx5e_priv *priv)
|
|||
struct mlx5e_flow_table *ft;
|
||||
int err;
|
||||
|
||||
priv->fs.vlan = kvzalloc(sizeof(*priv->fs.vlan), GFP_KERNEL);
|
||||
if (!priv->fs.vlan)
|
||||
return -ENOMEM;
|
||||
|
||||
ft = &priv->fs.vlan->ft;
|
||||
ft->num_groups = 0;
|
||||
|
||||
|
@ -1198,10 +1194,8 @@ static int mlx5e_create_vlan_table(struct mlx5e_priv *priv)
|
|||
ft_attr.prio = MLX5E_NIC_PRIO;
|
||||
|
||||
ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr);
|
||||
if (IS_ERR(ft->t)) {
|
||||
err = PTR_ERR(ft->t);
|
||||
goto err_free_t;
|
||||
}
|
||||
if (IS_ERR(ft->t))
|
||||
return PTR_ERR(ft->t);
|
||||
|
||||
ft->g = kcalloc(MLX5E_NUM_VLAN_GROUPS, sizeof(*ft->g), GFP_KERNEL);
|
||||
if (!ft->g) {
|
||||
|
@ -1221,9 +1215,6 @@ err_free_g:
|
|||
kfree(ft->g);
|
||||
err_destroy_vlan_table:
|
||||
mlx5_destroy_flow_table(ft->t);
|
||||
err_free_t:
|
||||
kvfree(priv->fs.vlan);
|
||||
priv->fs.vlan = NULL;
|
||||
|
||||
return err;
|
||||
}
|
||||
|
@ -1232,7 +1223,6 @@ static void mlx5e_destroy_vlan_table(struct mlx5e_priv *priv)
|
|||
{
|
||||
mlx5e_del_vlan_rules(priv);
|
||||
mlx5e_destroy_flow_table(&priv->fs.vlan->ft);
|
||||
kvfree(priv->fs.vlan);
|
||||
}
|
||||
|
||||
static void mlx5e_destroy_inner_ttc_table(struct mlx5e_priv *priv)
|
||||
|
@ -1351,3 +1341,17 @@ void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv)
|
|||
mlx5e_arfs_destroy_tables(priv);
|
||||
mlx5e_ethtool_cleanup_steering(priv);
|
||||
}
|
||||
|
||||
int mlx5e_fs_init(struct mlx5e_priv *priv)
|
||||
{
|
||||
priv->fs.vlan = kvzalloc(sizeof(*priv->fs.vlan), GFP_KERNEL);
|
||||
if (!priv->fs.vlan)
|
||||
return -ENOMEM;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void mlx5e_fs_cleanup(struct mlx5e_priv *priv)
|
||||
{
|
||||
kvfree(priv->fs.vlan);
|
||||
priv->fs.vlan = NULL;
|
||||
}
|
||||
|
|
|
@ -4578,6 +4578,12 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev,
|
|||
|
||||
mlx5e_timestamp_init(priv);
|
||||
|
||||
err = mlx5e_fs_init(priv);
|
||||
if (err) {
|
||||
mlx5_core_err(mdev, "FS initialization failed, %d\n", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
err = mlx5e_ipsec_init(priv);
|
||||
if (err)
|
||||
mlx5_core_err(mdev, "IPSec initialization failed, %d\n", err);
|
||||
|
@ -4595,6 +4601,7 @@ static void mlx5e_nic_cleanup(struct mlx5e_priv *priv)
|
|||
mlx5e_health_destroy_reporters(priv);
|
||||
mlx5e_tls_cleanup(priv);
|
||||
mlx5e_ipsec_cleanup(priv);
|
||||
mlx5e_fs_cleanup(priv);
|
||||
}
|
||||
|
||||
static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
|
||||
|
|
|
@ -67,6 +67,8 @@
|
|||
#include "lib/fs_chains.h"
|
||||
#include "diag/en_tc_tracepoint.h"
|
||||
#include <asm/div64.h>
|
||||
#include "lag.h"
|
||||
#include "lag_mp.h"
|
||||
|
||||
#define nic_chains(priv) ((priv)->fs.tc.chains)
|
||||
#define MLX5_MH_ACT_SZ MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)
|
||||
|
|
|
@ -213,19 +213,18 @@ static inline void mlx5e_insert_vlan(void *start, struct sk_buff *skb, u16 ihs)
|
|||
memcpy(&vhdr->h_vlan_encapsulated_proto, skb->data + cpy1_sz, cpy2_sz);
|
||||
}
|
||||
|
||||
/* If packet is not IP's CHECKSUM_PARTIAL (e.g. icmd packet),
|
||||
* need to set L3 checksum flag for IPsec
|
||||
*/
|
||||
static void
|
||||
ipsec_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb,
|
||||
struct mlx5_wqe_eth_seg *eseg)
|
||||
{
|
||||
struct xfrm_offload *xo = xfrm_offload(skb);
|
||||
|
||||
eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM;
|
||||
if (skb->encapsulation) {
|
||||
eseg->cs_flags |= MLX5_ETH_WQE_L3_INNER_CSUM;
|
||||
if (xo->inner_ipproto) {
|
||||
eseg->cs_flags |= MLX5_ETH_WQE_L4_INNER_CSUM | MLX5_ETH_WQE_L3_INNER_CSUM;
|
||||
} else if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
|
||||
eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM;
|
||||
sq->stats->csum_partial_inner++;
|
||||
} else {
|
||||
sq->stats->csum_partial++;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -234,6 +233,11 @@ mlx5e_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb,
|
|||
struct mlx5e_accel_tx_state *accel,
|
||||
struct mlx5_wqe_eth_seg *eseg)
|
||||
{
|
||||
if (unlikely(mlx5e_ipsec_eseg_meta(eseg))) {
|
||||
ipsec_txwqe_build_eseg_csum(sq, skb, eseg);
|
||||
return;
|
||||
}
|
||||
|
||||
if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
|
||||
eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM;
|
||||
if (skb->encapsulation) {
|
||||
|
@ -249,8 +253,6 @@ mlx5e_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb,
|
|||
eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM | MLX5_ETH_WQE_L4_CSUM;
|
||||
sq->stats->csum_partial++;
|
||||
#endif
|
||||
} else if (unlikely(mlx5e_ipsec_eseg_meta(eseg))) {
|
||||
ipsec_txwqe_build_eseg_csum(sq, skb, eseg);
|
||||
} else
|
||||
sq->stats->csum_none++;
|
||||
}
|
||||
|
|
|
@ -473,10 +473,9 @@ esw_qos_create_rate_group(struct mlx5_eswitch *esw, struct netlink_ext_ack *exta
|
|||
|
||||
err_min_rate:
|
||||
list_del(&group->list);
|
||||
err = mlx5_destroy_scheduling_element_cmd(esw->dev,
|
||||
SCHEDULING_HIERARCHY_E_SWITCH,
|
||||
group->tsar_ix);
|
||||
if (err)
|
||||
if (mlx5_destroy_scheduling_element_cmd(esw->dev,
|
||||
SCHEDULING_HIERARCHY_E_SWITCH,
|
||||
group->tsar_ix))
|
||||
NL_SET_ERR_MSG_MOD(extack, "E-Switch destroy TSAR for group failed");
|
||||
err_sched_elem:
|
||||
kfree(group);
|
||||
|
|
|
@ -442,6 +442,10 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
|
|||
if (!mlx5_lag_is_ready(ldev)) {
|
||||
do_bond = false;
|
||||
} else {
|
||||
/* VF LAG is in multipath mode, ignore bond change requests */
|
||||
if (mlx5_lag_is_multipath(dev0))
|
||||
return;
|
||||
|
||||
tracker = ldev->tracker;
|
||||
|
||||
do_bond = tracker.is_bonded && mlx5_lag_check_prereq(ldev);
|
||||
|
|
|
@ -9,20 +9,23 @@
|
|||
#include "eswitch.h"
|
||||
#include "lib/mlx5.h"
|
||||
|
||||
static bool __mlx5_lag_is_multipath(struct mlx5_lag *ldev)
|
||||
{
|
||||
return !!(ldev->flags & MLX5_LAG_FLAG_MULTIPATH);
|
||||
}
|
||||
|
||||
static bool mlx5_lag_multipath_check_prereq(struct mlx5_lag *ldev)
|
||||
{
|
||||
if (!mlx5_lag_is_ready(ldev))
|
||||
return false;
|
||||
|
||||
if (__mlx5_lag_is_active(ldev) && !__mlx5_lag_is_multipath(ldev))
|
||||
return false;
|
||||
|
||||
return mlx5_esw_multipath_prereq(ldev->pf[MLX5_LAG_P1].dev,
|
||||
ldev->pf[MLX5_LAG_P2].dev);
|
||||
}
|
||||
|
||||
static bool __mlx5_lag_is_multipath(struct mlx5_lag *ldev)
|
||||
{
|
||||
return !!(ldev->flags & MLX5_LAG_FLAG_MULTIPATH);
|
||||
}
|
||||
|
||||
bool mlx5_lag_is_multipath(struct mlx5_core_dev *dev)
|
||||
{
|
||||
struct mlx5_lag *ldev;
|
||||
|
|
|
@ -24,12 +24,14 @@ struct lag_mp {
|
|||
void mlx5_lag_mp_reset(struct mlx5_lag *ldev);
|
||||
int mlx5_lag_mp_init(struct mlx5_lag *ldev);
|
||||
void mlx5_lag_mp_cleanup(struct mlx5_lag *ldev);
|
||||
bool mlx5_lag_is_multipath(struct mlx5_core_dev *dev);
|
||||
|
||||
#else /* CONFIG_MLX5_ESWITCH */
|
||||
|
||||
static inline void mlx5_lag_mp_reset(struct mlx5_lag *ldev) {};
|
||||
static inline int mlx5_lag_mp_init(struct mlx5_lag *ldev) { return 0; }
|
||||
static inline void mlx5_lag_mp_cleanup(struct mlx5_lag *ldev) {}
|
||||
bool mlx5_lag_is_multipath(struct mlx5_core_dev *dev) { return false; }
|
||||
|
||||
#endif /* CONFIG_MLX5_ESWITCH */
|
||||
#endif /* __MLX5_LAG_MP_H__ */
|
||||
|
|
|
@ -758,6 +758,7 @@ static int mchp_sparx5_probe(struct platform_device *pdev)
|
|||
err = dev_err_probe(sparx5->dev, PTR_ERR(serdes),
|
||||
"port %u: missing serdes\n",
|
||||
portno);
|
||||
of_node_put(portnp);
|
||||
goto cleanup_config;
|
||||
}
|
||||
config->portno = portno;
|
||||
|
|
|
@ -969,6 +969,7 @@ static int mscc_ocelot_init_ports(struct platform_device *pdev,
|
|||
target = ocelot_regmap_init(ocelot, res);
|
||||
if (IS_ERR(target)) {
|
||||
err = PTR_ERR(target);
|
||||
of_node_put(portnp);
|
||||
goto out_teardown;
|
||||
}
|
||||
|
||||
|
|
|
@ -196,7 +196,7 @@ int swreg_to_unrestricted(swreg dst, swreg lreg, swreg rreg,
|
|||
}
|
||||
|
||||
reg->dst_lmextn = swreg_lmextn(dst);
|
||||
reg->src_lmextn = swreg_lmextn(lreg) | swreg_lmextn(rreg);
|
||||
reg->src_lmextn = swreg_lmextn(lreg) || swreg_lmextn(rreg);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -277,7 +277,7 @@ int swreg_to_restricted(swreg dst, swreg lreg, swreg rreg,
|
|||
}
|
||||
|
||||
reg->dst_lmextn = swreg_lmextn(dst);
|
||||
reg->src_lmextn = swreg_lmextn(lreg) | swreg_lmextn(rreg);
|
||||
reg->src_lmextn = swreg_lmextn(lreg) || swreg_lmextn(rreg);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -132,16 +132,27 @@ void mcdi_to_ethtool_linkset(u32 media, u32 cap, unsigned long *linkset)
|
|||
case MC_CMD_MEDIA_SFP_PLUS:
|
||||
case MC_CMD_MEDIA_QSFP_PLUS:
|
||||
SET_BIT(FIBRE);
|
||||
if (cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN))
|
||||
if (cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN)) {
|
||||
SET_BIT(1000baseT_Full);
|
||||
if (cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN))
|
||||
SET_BIT(10000baseT_Full);
|
||||
if (cap & (1 << MC_CMD_PHY_CAP_40000FDX_LBN))
|
||||
SET_BIT(1000baseX_Full);
|
||||
}
|
||||
if (cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN)) {
|
||||
SET_BIT(10000baseCR_Full);
|
||||
SET_BIT(10000baseLR_Full);
|
||||
SET_BIT(10000baseSR_Full);
|
||||
}
|
||||
if (cap & (1 << MC_CMD_PHY_CAP_40000FDX_LBN)) {
|
||||
SET_BIT(40000baseCR4_Full);
|
||||
if (cap & (1 << MC_CMD_PHY_CAP_100000FDX_LBN))
|
||||
SET_BIT(40000baseSR4_Full);
|
||||
}
|
||||
if (cap & (1 << MC_CMD_PHY_CAP_100000FDX_LBN)) {
|
||||
SET_BIT(100000baseCR4_Full);
|
||||
if (cap & (1 << MC_CMD_PHY_CAP_25000FDX_LBN))
|
||||
SET_BIT(100000baseSR4_Full);
|
||||
}
|
||||
if (cap & (1 << MC_CMD_PHY_CAP_25000FDX_LBN)) {
|
||||
SET_BIT(25000baseCR_Full);
|
||||
SET_BIT(25000baseSR_Full);
|
||||
}
|
||||
if (cap & (1 << MC_CMD_PHY_CAP_50000FDX_LBN))
|
||||
SET_BIT(50000baseCR2_Full);
|
||||
break;
|
||||
|
@ -192,15 +203,19 @@ u32 ethtool_linkset_to_mcdi_cap(const unsigned long *linkset)
|
|||
result |= (1 << MC_CMD_PHY_CAP_100FDX_LBN);
|
||||
if (TEST_BIT(1000baseT_Half))
|
||||
result |= (1 << MC_CMD_PHY_CAP_1000HDX_LBN);
|
||||
if (TEST_BIT(1000baseT_Full) || TEST_BIT(1000baseKX_Full))
|
||||
if (TEST_BIT(1000baseT_Full) || TEST_BIT(1000baseKX_Full) ||
|
||||
TEST_BIT(1000baseX_Full))
|
||||
result |= (1 << MC_CMD_PHY_CAP_1000FDX_LBN);
|
||||
if (TEST_BIT(10000baseT_Full) || TEST_BIT(10000baseKX4_Full))
|
||||
if (TEST_BIT(10000baseT_Full) || TEST_BIT(10000baseKX4_Full) ||
|
||||
TEST_BIT(10000baseCR_Full) || TEST_BIT(10000baseLR_Full) ||
|
||||
TEST_BIT(10000baseSR_Full))
|
||||
result |= (1 << MC_CMD_PHY_CAP_10000FDX_LBN);
|
||||
if (TEST_BIT(40000baseCR4_Full) || TEST_BIT(40000baseKR4_Full))
|
||||
if (TEST_BIT(40000baseCR4_Full) || TEST_BIT(40000baseKR4_Full) ||
|
||||
TEST_BIT(40000baseSR4_Full))
|
||||
result |= (1 << MC_CMD_PHY_CAP_40000FDX_LBN);
|
||||
if (TEST_BIT(100000baseCR4_Full))
|
||||
if (TEST_BIT(100000baseCR4_Full) || TEST_BIT(100000baseSR4_Full))
|
||||
result |= (1 << MC_CMD_PHY_CAP_100000FDX_LBN);
|
||||
if (TEST_BIT(25000baseCR_Full))
|
||||
if (TEST_BIT(25000baseCR_Full) || TEST_BIT(25000baseSR_Full))
|
||||
result |= (1 << MC_CMD_PHY_CAP_25000FDX_LBN);
|
||||
if (TEST_BIT(50000baseCR2_Full))
|
||||
result |= (1 << MC_CMD_PHY_CAP_50000FDX_LBN);
|
||||
|
|
|
@ -648,7 +648,7 @@ static int efx_ptp_get_attributes(struct efx_nic *efx)
|
|||
} else if (rc == -EINVAL) {
|
||||
fmt = MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_NANOSECONDS;
|
||||
} else if (rc == -EPERM) {
|
||||
netif_info(efx, probe, efx->net_dev, "no PTP support\n");
|
||||
pci_info(efx->pci_dev, "no PTP support\n");
|
||||
return rc;
|
||||
} else {
|
||||
efx_mcdi_display_error(efx, MC_CMD_PTP, sizeof(inbuf),
|
||||
|
@ -824,7 +824,7 @@ static int efx_ptp_disable(struct efx_nic *efx)
|
|||
* should only have been called during probe.
|
||||
*/
|
||||
if (rc == -ENOSYS || rc == -EPERM)
|
||||
netif_info(efx, probe, efx->net_dev, "no PTP support\n");
|
||||
pci_info(efx->pci_dev, "no PTP support\n");
|
||||
else if (rc)
|
||||
efx_mcdi_display_error(efx, MC_CMD_PTP,
|
||||
MC_CMD_PTP_IN_DISABLE_LEN,
|
||||
|
|
|
@ -1057,7 +1057,7 @@ void efx_siena_sriov_probe(struct efx_nic *efx)
|
|||
return;
|
||||
|
||||
if (efx_siena_sriov_cmd(efx, false, &efx->vi_scale, &count)) {
|
||||
netif_info(efx, probe, efx->net_dev, "no SR-IOV VFs probed\n");
|
||||
pci_info(efx->pci_dev, "no SR-IOV VFs probed\n");
|
||||
return;
|
||||
}
|
||||
if (count > 0 && count > max_vfs)
|
||||
|
|
|
@ -736,7 +736,7 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
|
|||
config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
|
||||
ptp_v2 = PTP_TCR_TSVER2ENA;
|
||||
snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
|
||||
if (priv->synopsys_id != DWMAC_CORE_5_10)
|
||||
if (priv->synopsys_id < DWMAC_CORE_4_10)
|
||||
ts_event_en = PTP_TCR_TSEVNTENA;
|
||||
ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
|
||||
ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
|
||||
|
|
|
@ -623,16 +623,16 @@ static int receive(struct net_device *dev, int cnt)
|
|||
|
||||
/* --------------------------------------------------------------------- */
|
||||
|
||||
#ifdef __i386__
|
||||
#if defined(__i386__) && !defined(CONFIG_UML)
|
||||
#include <asm/msr.h>
|
||||
#define GETTICK(x) \
|
||||
({ \
|
||||
if (boot_cpu_has(X86_FEATURE_TSC)) \
|
||||
x = (unsigned int)rdtsc(); \
|
||||
})
|
||||
#else /* __i386__ */
|
||||
#else /* __i386__ && !CONFIG_UML */
|
||||
#define GETTICK(x)
|
||||
#endif /* __i386__ */
|
||||
#endif /* __i386__ && !CONFIG_UML */
|
||||
|
||||
static void epp_bh(struct work_struct *work)
|
||||
{
|
||||
|
|
|
@ -117,6 +117,7 @@ config USB_LAN78XX
|
|||
select PHYLIB
|
||||
select MICROCHIP_PHY
|
||||
select FIXED_PHY
|
||||
select CRC32
|
||||
help
|
||||
This option adds support for Microchip LAN78XX based USB 2
|
||||
& USB 3 10/100/1000 Ethernet adapters.
|
||||
|
|
|
@ -1788,6 +1788,10 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
|
|||
if (!dev->rx_urb_size)
|
||||
dev->rx_urb_size = dev->hard_mtu;
|
||||
dev->maxpacket = usb_maxpacket (dev->udev, dev->out, 1);
|
||||
if (dev->maxpacket == 0) {
|
||||
/* that is a broken device */
|
||||
goto out4;
|
||||
}
|
||||
|
||||
/* let userspace know we have a random address */
|
||||
if (ether_addr_equal(net->dev_addr, node_id))
|
||||
|
|
|
@ -1360,8 +1360,6 @@ static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev,
|
|||
bool need_strict = rt6_need_strict(&ipv6_hdr(skb)->daddr);
|
||||
bool is_ndisc = ipv6_ndisc_frame(skb);
|
||||
|
||||
nf_reset_ct(skb);
|
||||
|
||||
/* loopback, multicast & non-ND link-local traffic; do not push through
|
||||
* packet taps again. Reset pkt_type for upper layers to process skb.
|
||||
* For strict packets with a source LLA, determine the dst using the
|
||||
|
@ -1424,8 +1422,6 @@ static struct sk_buff *vrf_ip_rcv(struct net_device *vrf_dev,
|
|||
skb->skb_iif = vrf_dev->ifindex;
|
||||
IPCB(skb)->flags |= IPSKB_L3SLAVE;
|
||||
|
||||
nf_reset_ct(skb);
|
||||
|
||||
if (ipv4_is_multicast(ip_hdr(skb)->daddr))
|
||||
goto out;
|
||||
|
||||
|
|
|
@ -1226,11 +1226,9 @@ static int st95hf_remove(struct spi_device *nfc_spi_dev)
|
|||
&reset_cmd,
|
||||
ST95HF_RESET_CMD_LEN,
|
||||
ASYNC);
|
||||
if (result) {
|
||||
if (result)
|
||||
dev_err(&spictx->spidev->dev,
|
||||
"ST95HF reset failed in remove() err = %d\n", result);
|
||||
return result;
|
||||
}
|
||||
|
||||
/* wait for 3 ms to complete the controller reset process */
|
||||
usleep_range(3000, 4000);
|
||||
|
@ -1239,7 +1237,7 @@ static int st95hf_remove(struct spi_device *nfc_spi_dev)
|
|||
if (stcontext->st95hf_supply)
|
||||
regulator_disable(stcontext->st95hf_supply);
|
||||
|
||||
return result;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Register as SPI protocol driver */
|
||||
|
|
|
@ -170,6 +170,7 @@ static void ptp_clock_release(struct device *dev)
|
|||
struct ptp_clock *ptp = container_of(dev, struct ptp_clock, dev);
|
||||
|
||||
ptp_cleanup_pin_groups(ptp);
|
||||
kfree(ptp->vclock_index);
|
||||
mutex_destroy(&ptp->tsevq_mux);
|
||||
mutex_destroy(&ptp->pincfg_mux);
|
||||
mutex_destroy(&ptp->n_vclocks_mux);
|
||||
|
@ -283,15 +284,20 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
|
|||
/* Create a posix clock and link it to the device. */
|
||||
err = posix_clock_register(&ptp->clock, &ptp->dev);
|
||||
if (err) {
|
||||
if (ptp->pps_source)
|
||||
pps_unregister_source(ptp->pps_source);
|
||||
|
||||
if (ptp->kworker)
|
||||
kthread_destroy_worker(ptp->kworker);
|
||||
|
||||
put_device(&ptp->dev);
|
||||
|
||||
pr_err("failed to create posix clock\n");
|
||||
goto no_clock;
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
return ptp;
|
||||
|
||||
no_clock:
|
||||
if (ptp->pps_source)
|
||||
pps_unregister_source(ptp->pps_source);
|
||||
no_pps:
|
||||
ptp_cleanup_pin_groups(ptp);
|
||||
no_pin_groups:
|
||||
|
@ -321,8 +327,6 @@ int ptp_clock_unregister(struct ptp_clock *ptp)
|
|||
ptp->defunct = 1;
|
||||
wake_up_interruptible(&ptp->tsev_wq);
|
||||
|
||||
kfree(ptp->vclock_index);
|
||||
|
||||
if (ptp->kworker) {
|
||||
kthread_cancel_delayed_work_sync(&ptp->aux_work);
|
||||
kthread_destroy_worker(ptp->kworker);
|
||||
|
|
|
@ -31,10 +31,10 @@ int kvm_arch_ptp_init(void)
|
|||
|
||||
ret = kvm_hypercall2(KVM_HC_CLOCK_PAIRING, clock_pair_gpa,
|
||||
KVM_CLOCK_PAIRING_WALLCLOCK);
|
||||
if (ret == -KVM_ENOSYS || ret == -KVM_EOPNOTSUPP)
|
||||
if (ret == -KVM_ENOSYS)
|
||||
return -ENODEV;
|
||||
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
int kvm_arch_ptp_get_clock(struct timespec64 *ts)
|
||||
|
|
|
@ -1138,7 +1138,6 @@ int mlx5_cmd_create_vport_lag(struct mlx5_core_dev *dev);
|
|||
int mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev *dev);
|
||||
bool mlx5_lag_is_roce(struct mlx5_core_dev *dev);
|
||||
bool mlx5_lag_is_sriov(struct mlx5_core_dev *dev);
|
||||
bool mlx5_lag_is_multipath(struct mlx5_core_dev *dev);
|
||||
bool mlx5_lag_is_active(struct mlx5_core_dev *dev);
|
||||
bool mlx5_lag_is_master(struct mlx5_core_dev *dev);
|
||||
bool mlx5_lag_is_shared_fdb(struct mlx5_core_dev *dev);
|
||||
|
|
|
@ -54,7 +54,7 @@ struct mctp_sock {
|
|||
struct sock sk;
|
||||
|
||||
/* bind() params */
|
||||
int bind_net;
|
||||
unsigned int bind_net;
|
||||
mctp_eid_t bind_addr;
|
||||
__u8 bind_type;
|
||||
|
||||
|
|
|
@ -384,11 +384,11 @@ sctp_vtag_verify(const struct sctp_chunk *chunk,
|
|||
* Verification Tag value does not match the receiver's own
|
||||
* tag value, the receiver shall silently discard the packet...
|
||||
*/
|
||||
if (ntohl(chunk->sctp_hdr->vtag) == asoc->c.my_vtag)
|
||||
return 1;
|
||||
if (ntohl(chunk->sctp_hdr->vtag) != asoc->c.my_vtag)
|
||||
return 0;
|
||||
|
||||
chunk->transport->encap_port = SCTP_INPUT_CB(chunk->skb)->encap_port;
|
||||
return 0;
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* Check VTAG of the packet matches the sender's own tag and the T bit is
|
||||
|
|
|
@ -1576,6 +1576,7 @@ struct tcp_md5sig_key {
|
|||
u8 keylen;
|
||||
u8 family; /* AF_INET or AF_INET6 */
|
||||
u8 prefixlen;
|
||||
u8 flags;
|
||||
union tcp_md5_addr addr;
|
||||
int l3index; /* set if key added with L3 scope */
|
||||
u8 key[TCP_MD5SIG_MAXKEYLEN];
|
||||
|
@ -1621,10 +1622,10 @@ struct tcp_md5sig_pool {
|
|||
int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
|
||||
const struct sock *sk, const struct sk_buff *skb);
|
||||
int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
|
||||
int family, u8 prefixlen, int l3index,
|
||||
int family, u8 prefixlen, int l3index, u8 flags,
|
||||
const u8 *newkey, u8 newkeylen, gfp_t gfp);
|
||||
int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr,
|
||||
int family, u8 prefixlen, int l3index);
|
||||
int family, u8 prefixlen, int l3index, u8 flags);
|
||||
struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
|
||||
const struct sock *addr_sk);
|
||||
|
||||
|
|
|
@ -10,6 +10,7 @@
|
|||
#define __UAPI_MCTP_H
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/socket.h>
|
||||
|
||||
typedef __u8 mctp_eid_t;
|
||||
|
||||
|
@ -18,11 +19,13 @@ struct mctp_addr {
|
|||
};
|
||||
|
||||
struct sockaddr_mctp {
|
||||
unsigned short int smctp_family;
|
||||
int smctp_network;
|
||||
__kernel_sa_family_t smctp_family;
|
||||
__u16 __smctp_pad0;
|
||||
unsigned int smctp_network;
|
||||
struct mctp_addr smctp_addr;
|
||||
__u8 smctp_type;
|
||||
__u8 smctp_tag;
|
||||
__u8 __smctp_pad1;
|
||||
};
|
||||
|
||||
#define MCTP_NET_ANY 0x0
|
||||
|
|
|
@ -1125,9 +1125,7 @@ static inline unsigned long br_multicast_lmqt(const struct net_bridge_mcast *brm
|
|||
|
||||
static inline unsigned long br_multicast_gmi(const struct net_bridge_mcast *brmctx)
|
||||
{
|
||||
/* use the RFC default of 2 for QRV */
|
||||
return 2 * brmctx->multicast_query_interval +
|
||||
brmctx->multicast_query_response_interval;
|
||||
return brmctx->multicast_membership_interval;
|
||||
}
|
||||
|
||||
static inline bool
|
||||
|
|
|
@ -926,7 +926,9 @@ static int translate_table(struct net *net, const char *name,
|
|||
return -ENOMEM;
|
||||
for_each_possible_cpu(i) {
|
||||
newinfo->chainstack[i] =
|
||||
vmalloc(array_size(udc_cnt, sizeof(*(newinfo->chainstack[0]))));
|
||||
vmalloc_node(array_size(udc_cnt,
|
||||
sizeof(*(newinfo->chainstack[0]))),
|
||||
cpu_to_node(i));
|
||||
if (!newinfo->chainstack[i]) {
|
||||
while (i)
|
||||
vfree(newinfo->chainstack[--i]);
|
||||
|
|
|
@ -121,7 +121,7 @@ enum {
|
|||
struct tpcon {
|
||||
int idx;
|
||||
int len;
|
||||
u8 state;
|
||||
u32 state;
|
||||
u8 bs;
|
||||
u8 sn;
|
||||
u8 ll_dl;
|
||||
|
@ -848,6 +848,7 @@ static int isotp_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
|
|||
{
|
||||
struct sock *sk = sock->sk;
|
||||
struct isotp_sock *so = isotp_sk(sk);
|
||||
u32 old_state = so->tx.state;
|
||||
struct sk_buff *skb;
|
||||
struct net_device *dev;
|
||||
struct canfd_frame *cf;
|
||||
|
@ -860,45 +861,55 @@ static int isotp_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
|
|||
return -EADDRNOTAVAIL;
|
||||
|
||||
/* we do not support multiple buffers - for now */
|
||||
if (so->tx.state != ISOTP_IDLE || wq_has_sleeper(&so->wait)) {
|
||||
if (msg->msg_flags & MSG_DONTWAIT)
|
||||
return -EAGAIN;
|
||||
if (cmpxchg(&so->tx.state, ISOTP_IDLE, ISOTP_SENDING) != ISOTP_IDLE ||
|
||||
wq_has_sleeper(&so->wait)) {
|
||||
if (msg->msg_flags & MSG_DONTWAIT) {
|
||||
err = -EAGAIN;
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
/* wait for complete transmission of current pdu */
|
||||
wait_event_interruptible(so->wait, so->tx.state == ISOTP_IDLE);
|
||||
err = wait_event_interruptible(so->wait, so->tx.state == ISOTP_IDLE);
|
||||
if (err)
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
if (!size || size > MAX_MSG_LENGTH)
|
||||
return -EINVAL;
|
||||
if (!size || size > MAX_MSG_LENGTH) {
|
||||
err = -EINVAL;
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
/* take care of a potential SF_DL ESC offset for TX_DL > 8 */
|
||||
off = (so->tx.ll_dl > CAN_MAX_DLEN) ? 1 : 0;
|
||||
|
||||
/* does the given data fit into a single frame for SF_BROADCAST? */
|
||||
if ((so->opt.flags & CAN_ISOTP_SF_BROADCAST) &&
|
||||
(size > so->tx.ll_dl - SF_PCI_SZ4 - ae - off))
|
||||
return -EINVAL;
|
||||
(size > so->tx.ll_dl - SF_PCI_SZ4 - ae - off)) {
|
||||
err = -EINVAL;
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
err = memcpy_from_msg(so->tx.buf, msg, size);
|
||||
if (err < 0)
|
||||
return err;
|
||||
goto err_out;
|
||||
|
||||
dev = dev_get_by_index(sock_net(sk), so->ifindex);
|
||||
if (!dev)
|
||||
return -ENXIO;
|
||||
if (!dev) {
|
||||
err = -ENXIO;
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
skb = sock_alloc_send_skb(sk, so->ll.mtu + sizeof(struct can_skb_priv),
|
||||
msg->msg_flags & MSG_DONTWAIT, &err);
|
||||
if (!skb) {
|
||||
dev_put(dev);
|
||||
return err;
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
can_skb_reserve(skb);
|
||||
can_skb_prv(skb)->ifindex = dev->ifindex;
|
||||
can_skb_prv(skb)->skbcnt = 0;
|
||||
|
||||
so->tx.state = ISOTP_SENDING;
|
||||
so->tx.len = size;
|
||||
so->tx.idx = 0;
|
||||
|
||||
|
@ -954,15 +965,25 @@ static int isotp_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
|
|||
if (err) {
|
||||
pr_notice_once("can-isotp: %s: can_send_ret %pe\n",
|
||||
__func__, ERR_PTR(err));
|
||||
return err;
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
if (wait_tx_done) {
|
||||
/* wait for complete transmission of current pdu */
|
||||
wait_event_interruptible(so->wait, so->tx.state == ISOTP_IDLE);
|
||||
|
||||
if (sk->sk_err)
|
||||
return -sk->sk_err;
|
||||
}
|
||||
|
||||
return size;
|
||||
|
||||
err_out:
|
||||
so->tx.state = old_state;
|
||||
if (so->tx.state == ISOTP_IDLE)
|
||||
wake_up_interruptible(&so->wait);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int isotp_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
|
||||
|
|
|
@ -330,6 +330,7 @@ int j1939_session_activate(struct j1939_session *session);
|
|||
void j1939_tp_schedule_txtimer(struct j1939_session *session, int msec);
|
||||
void j1939_session_timers_cancel(struct j1939_session *session);
|
||||
|
||||
#define J1939_MIN_TP_PACKET_SIZE 9
|
||||
#define J1939_MAX_TP_PACKET_SIZE (7 * 0xff)
|
||||
#define J1939_MAX_ETP_PACKET_SIZE (7 * 0x00ffffff)
|
||||
|
||||
|
|
|
@ -249,11 +249,14 @@ struct j1939_priv *j1939_netdev_start(struct net_device *ndev)
|
|||
struct j1939_priv *priv, *priv_new;
|
||||
int ret;
|
||||
|
||||
priv = j1939_priv_get_by_ndev(ndev);
|
||||
spin_lock(&j1939_netdev_lock);
|
||||
priv = j1939_priv_get_by_ndev_locked(ndev);
|
||||
if (priv) {
|
||||
kref_get(&priv->rx_kref);
|
||||
spin_unlock(&j1939_netdev_lock);
|
||||
return priv;
|
||||
}
|
||||
spin_unlock(&j1939_netdev_lock);
|
||||
|
||||
priv = j1939_priv_create(ndev);
|
||||
if (!priv)
|
||||
|
@ -269,10 +272,10 @@ struct j1939_priv *j1939_netdev_start(struct net_device *ndev)
|
|||
/* Someone was faster than us, use their priv and roll
|
||||
* back our's.
|
||||
*/
|
||||
kref_get(&priv_new->rx_kref);
|
||||
spin_unlock(&j1939_netdev_lock);
|
||||
dev_put(ndev);
|
||||
kfree(priv);
|
||||
kref_get(&priv_new->rx_kref);
|
||||
return priv_new;
|
||||
}
|
||||
j1939_priv_set(ndev, priv);
|
||||
|
|
|
@ -1237,12 +1237,11 @@ static enum hrtimer_restart j1939_tp_rxtimer(struct hrtimer *hrtimer)
|
|||
session->err = -ETIME;
|
||||
j1939_session_deactivate(session);
|
||||
} else {
|
||||
netdev_alert(priv->ndev, "%s: 0x%p: rx timeout, send abort\n",
|
||||
__func__, session);
|
||||
|
||||
j1939_session_list_lock(session->priv);
|
||||
if (session->state >= J1939_SESSION_ACTIVE &&
|
||||
session->state < J1939_SESSION_ACTIVE_MAX) {
|
||||
netdev_alert(priv->ndev, "%s: 0x%p: rx timeout, send abort\n",
|
||||
__func__, session);
|
||||
j1939_session_get(session);
|
||||
hrtimer_start(&session->rxtimer,
|
||||
ms_to_ktime(J1939_XTP_ABORT_TIMEOUT_MS),
|
||||
|
@ -1609,6 +1608,8 @@ j1939_session *j1939_xtp_rx_rts_session_new(struct j1939_priv *priv,
|
|||
abort = J1939_XTP_ABORT_FAULT;
|
||||
else if (len > priv->tp_max_packet_size)
|
||||
abort = J1939_XTP_ABORT_RESOURCE;
|
||||
else if (len < J1939_MIN_TP_PACKET_SIZE)
|
||||
abort = J1939_XTP_ABORT_FAULT;
|
||||
}
|
||||
|
||||
if (abort != J1939_XTP_NO_ABORT) {
|
||||
|
@ -1789,6 +1790,7 @@ static void j1939_xtp_rx_dpo(struct j1939_priv *priv, struct sk_buff *skb,
|
|||
static void j1939_xtp_rx_dat_one(struct j1939_session *session,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
enum j1939_xtp_abort abort = J1939_XTP_ABORT_FAULT;
|
||||
struct j1939_priv *priv = session->priv;
|
||||
struct j1939_sk_buff_cb *skcb, *se_skcb;
|
||||
struct sk_buff *se_skb = NULL;
|
||||
|
@ -1803,9 +1805,11 @@ static void j1939_xtp_rx_dat_one(struct j1939_session *session,
|
|||
|
||||
skcb = j1939_skb_to_cb(skb);
|
||||
dat = skb->data;
|
||||
if (skb->len <= 1)
|
||||
if (skb->len != 8) {
|
||||
/* makes no sense */
|
||||
abort = J1939_XTP_ABORT_UNEXPECTED_DATA;
|
||||
goto out_session_cancel;
|
||||
}
|
||||
|
||||
switch (session->last_cmd) {
|
||||
case 0xff:
|
||||
|
@ -1904,7 +1908,7 @@ static void j1939_xtp_rx_dat_one(struct j1939_session *session,
|
|||
out_session_cancel:
|
||||
kfree_skb(se_skb);
|
||||
j1939_session_timers_cancel(session);
|
||||
j1939_session_cancel(session, J1939_XTP_ABORT_FAULT);
|
||||
j1939_session_cancel(session, abort);
|
||||
j1939_session_put(session);
|
||||
}
|
||||
|
||||
|
|
|
@ -1374,12 +1374,15 @@ static int dsa_switch_parse_ports_of(struct dsa_switch *ds,
|
|||
|
||||
for_each_available_child_of_node(ports, port) {
|
||||
err = of_property_read_u32(port, "reg", ®);
|
||||
if (err)
|
||||
if (err) {
|
||||
of_node_put(port);
|
||||
goto out_put_node;
|
||||
}
|
||||
|
||||
if (reg >= ds->num_ports) {
|
||||
dev_err(ds->dev, "port %pOF index %u exceeds num_ports (%zu)\n",
|
||||
port, reg, ds->num_ports);
|
||||
of_node_put(port);
|
||||
err = -EINVAL;
|
||||
goto out_put_node;
|
||||
}
|
||||
|
@ -1387,8 +1390,10 @@ static int dsa_switch_parse_ports_of(struct dsa_switch *ds,
|
|||
dp = dsa_to_port(ds, reg);
|
||||
|
||||
err = dsa_port_parse_of(dp, port);
|
||||
if (err)
|
||||
if (err) {
|
||||
of_node_put(port);
|
||||
goto out_put_node;
|
||||
}
|
||||
}
|
||||
|
||||
out_put_node:
|
||||
|
|
|
@ -1037,6 +1037,20 @@ static void tcp_v4_reqsk_destructor(struct request_sock *req)
|
|||
DEFINE_STATIC_KEY_FALSE(tcp_md5_needed);
|
||||
EXPORT_SYMBOL(tcp_md5_needed);
|
||||
|
||||
static bool better_md5_match(struct tcp_md5sig_key *old, struct tcp_md5sig_key *new)
|
||||
{
|
||||
if (!old)
|
||||
return true;
|
||||
|
||||
/* l3index always overrides non-l3index */
|
||||
if (old->l3index && new->l3index == 0)
|
||||
return false;
|
||||
if (old->l3index == 0 && new->l3index)
|
||||
return true;
|
||||
|
||||
return old->prefixlen < new->prefixlen;
|
||||
}
|
||||
|
||||
/* Find the Key structure for an address. */
|
||||
struct tcp_md5sig_key *__tcp_md5_do_lookup(const struct sock *sk, int l3index,
|
||||
const union tcp_md5_addr *addr,
|
||||
|
@ -1059,7 +1073,7 @@ struct tcp_md5sig_key *__tcp_md5_do_lookup(const struct sock *sk, int l3index,
|
|||
lockdep_sock_is_held(sk)) {
|
||||
if (key->family != family)
|
||||
continue;
|
||||
if (key->l3index && key->l3index != l3index)
|
||||
if (key->flags & TCP_MD5SIG_FLAG_IFINDEX && key->l3index != l3index)
|
||||
continue;
|
||||
if (family == AF_INET) {
|
||||
mask = inet_make_mask(key->prefixlen);
|
||||
|
@ -1074,8 +1088,7 @@ struct tcp_md5sig_key *__tcp_md5_do_lookup(const struct sock *sk, int l3index,
|
|||
match = false;
|
||||
}
|
||||
|
||||
if (match && (!best_match ||
|
||||
key->prefixlen > best_match->prefixlen))
|
||||
if (match && better_md5_match(best_match, key))
|
||||
best_match = key;
|
||||
}
|
||||
return best_match;
|
||||
|
@ -1085,7 +1098,7 @@ EXPORT_SYMBOL(__tcp_md5_do_lookup);
|
|||
static struct tcp_md5sig_key *tcp_md5_do_lookup_exact(const struct sock *sk,
|
||||
const union tcp_md5_addr *addr,
|
||||
int family, u8 prefixlen,
|
||||
int l3index)
|
||||
int l3index, u8 flags)
|
||||
{
|
||||
const struct tcp_sock *tp = tcp_sk(sk);
|
||||
struct tcp_md5sig_key *key;
|
||||
|
@ -1105,7 +1118,9 @@ static struct tcp_md5sig_key *tcp_md5_do_lookup_exact(const struct sock *sk,
|
|||
lockdep_sock_is_held(sk)) {
|
||||
if (key->family != family)
|
||||
continue;
|
||||
if (key->l3index && key->l3index != l3index)
|
||||
if ((key->flags & TCP_MD5SIG_FLAG_IFINDEX) != (flags & TCP_MD5SIG_FLAG_IFINDEX))
|
||||
continue;
|
||||
if (key->l3index != l3index)
|
||||
continue;
|
||||
if (!memcmp(&key->addr, addr, size) &&
|
||||
key->prefixlen == prefixlen)
|
||||
|
@ -1129,7 +1144,7 @@ EXPORT_SYMBOL(tcp_v4_md5_lookup);
|
|||
|
||||
/* This can be called on a newly created socket, from other files */
|
||||
int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
|
||||
int family, u8 prefixlen, int l3index,
|
||||
int family, u8 prefixlen, int l3index, u8 flags,
|
||||
const u8 *newkey, u8 newkeylen, gfp_t gfp)
|
||||
{
|
||||
/* Add Key to the list */
|
||||
|
@ -1137,7 +1152,7 @@ int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
|
|||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
struct tcp_md5sig_info *md5sig;
|
||||
|
||||
key = tcp_md5_do_lookup_exact(sk, addr, family, prefixlen, l3index);
|
||||
key = tcp_md5_do_lookup_exact(sk, addr, family, prefixlen, l3index, flags);
|
||||
if (key) {
|
||||
/* Pre-existing entry - just update that one.
|
||||
* Note that the key might be used concurrently.
|
||||
|
@ -1182,6 +1197,7 @@ int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
|
|||
key->family = family;
|
||||
key->prefixlen = prefixlen;
|
||||
key->l3index = l3index;
|
||||
key->flags = flags;
|
||||
memcpy(&key->addr, addr,
|
||||
(family == AF_INET6) ? sizeof(struct in6_addr) :
|
||||
sizeof(struct in_addr));
|
||||
|
@ -1191,11 +1207,11 @@ int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
|
|||
EXPORT_SYMBOL(tcp_md5_do_add);
|
||||
|
||||
int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family,
|
||||
u8 prefixlen, int l3index)
|
||||
u8 prefixlen, int l3index, u8 flags)
|
||||
{
|
||||
struct tcp_md5sig_key *key;
|
||||
|
||||
key = tcp_md5_do_lookup_exact(sk, addr, family, prefixlen, l3index);
|
||||
key = tcp_md5_do_lookup_exact(sk, addr, family, prefixlen, l3index, flags);
|
||||
if (!key)
|
||||
return -ENOENT;
|
||||
hlist_del_rcu(&key->node);
|
||||
|
@ -1229,6 +1245,7 @@ static int tcp_v4_parse_md5_keys(struct sock *sk, int optname,
|
|||
const union tcp_md5_addr *addr;
|
||||
u8 prefixlen = 32;
|
||||
int l3index = 0;
|
||||
u8 flags;
|
||||
|
||||
if (optlen < sizeof(cmd))
|
||||
return -EINVAL;
|
||||
|
@ -1239,6 +1256,8 @@ static int tcp_v4_parse_md5_keys(struct sock *sk, int optname,
|
|||
if (sin->sin_family != AF_INET)
|
||||
return -EINVAL;
|
||||
|
||||
flags = cmd.tcpm_flags & TCP_MD5SIG_FLAG_IFINDEX;
|
||||
|
||||
if (optname == TCP_MD5SIG_EXT &&
|
||||
cmd.tcpm_flags & TCP_MD5SIG_FLAG_PREFIX) {
|
||||
prefixlen = cmd.tcpm_prefixlen;
|
||||
|
@ -1246,7 +1265,7 @@ static int tcp_v4_parse_md5_keys(struct sock *sk, int optname,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (optname == TCP_MD5SIG_EXT &&
|
||||
if (optname == TCP_MD5SIG_EXT && cmd.tcpm_ifindex &&
|
||||
cmd.tcpm_flags & TCP_MD5SIG_FLAG_IFINDEX) {
|
||||
struct net_device *dev;
|
||||
|
||||
|
@ -1267,12 +1286,12 @@ static int tcp_v4_parse_md5_keys(struct sock *sk, int optname,
|
|||
addr = (union tcp_md5_addr *)&sin->sin_addr.s_addr;
|
||||
|
||||
if (!cmd.tcpm_keylen)
|
||||
return tcp_md5_do_del(sk, addr, AF_INET, prefixlen, l3index);
|
||||
return tcp_md5_do_del(sk, addr, AF_INET, prefixlen, l3index, flags);
|
||||
|
||||
if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
|
||||
return -EINVAL;
|
||||
|
||||
return tcp_md5_do_add(sk, addr, AF_INET, prefixlen, l3index,
|
||||
return tcp_md5_do_add(sk, addr, AF_INET, prefixlen, l3index, flags,
|
||||
cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
|
||||
}
|
||||
|
||||
|
@ -1596,7 +1615,7 @@ struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
|
|||
* memory, then we end up not copying the key
|
||||
* across. Shucks.
|
||||
*/
|
||||
tcp_md5_do_add(newsk, addr, AF_INET, 32, l3index,
|
||||
tcp_md5_do_add(newsk, addr, AF_INET, 32, l3index, key->flags,
|
||||
key->key, key->keylen, GFP_ATOMIC);
|
||||
sk_nocaps_add(newsk, NETIF_F_GSO_MASK);
|
||||
}
|
||||
|
|
|
@ -464,13 +464,14 @@ static bool ip6_pkt_too_big(const struct sk_buff *skb, unsigned int mtu)
|
|||
|
||||
int ip6_forward(struct sk_buff *skb)
|
||||
{
|
||||
struct inet6_dev *idev = __in6_dev_get_safely(skb->dev);
|
||||
struct dst_entry *dst = skb_dst(skb);
|
||||
struct ipv6hdr *hdr = ipv6_hdr(skb);
|
||||
struct inet6_skb_parm *opt = IP6CB(skb);
|
||||
struct net *net = dev_net(dst->dev);
|
||||
struct inet6_dev *idev;
|
||||
u32 mtu;
|
||||
|
||||
idev = __in6_dev_get_safely(dev_get_by_index_rcu(net, IP6CB(skb)->iif));
|
||||
if (net->ipv6.devconf_all->forwarding == 0)
|
||||
goto error;
|
||||
|
||||
|
|
|
@ -25,12 +25,7 @@ MODULE_AUTHOR("Andras Kis-Szabo <kisza@sch.bme.hu>");
|
|||
static inline bool
|
||||
segsleft_match(u_int32_t min, u_int32_t max, u_int32_t id, bool invert)
|
||||
{
|
||||
bool r;
|
||||
pr_debug("segsleft_match:%c 0x%x <= 0x%x <= 0x%x\n",
|
||||
invert ? '!' : ' ', min, id, max);
|
||||
r = (id >= min && id <= max) ^ invert;
|
||||
pr_debug(" result %s\n", r ? "PASS" : "FAILED");
|
||||
return r;
|
||||
return (id >= min && id <= max) ^ invert;
|
||||
}
|
||||
|
||||
static bool rt_mt6(const struct sk_buff *skb, struct xt_action_param *par)
|
||||
|
@ -65,30 +60,6 @@ static bool rt_mt6(const struct sk_buff *skb, struct xt_action_param *par)
|
|||
return false;
|
||||
}
|
||||
|
||||
pr_debug("IPv6 RT LEN %u %u ", hdrlen, rh->hdrlen);
|
||||
pr_debug("TYPE %04X ", rh->type);
|
||||
pr_debug("SGS_LEFT %u %02X\n", rh->segments_left, rh->segments_left);
|
||||
|
||||
pr_debug("IPv6 RT segsleft %02X ",
|
||||
segsleft_match(rtinfo->segsleft[0], rtinfo->segsleft[1],
|
||||
rh->segments_left,
|
||||
!!(rtinfo->invflags & IP6T_RT_INV_SGS)));
|
||||
pr_debug("type %02X %02X %02X ",
|
||||
rtinfo->rt_type, rh->type,
|
||||
(!(rtinfo->flags & IP6T_RT_TYP) ||
|
||||
((rtinfo->rt_type == rh->type) ^
|
||||
!!(rtinfo->invflags & IP6T_RT_INV_TYP))));
|
||||
pr_debug("len %02X %04X %02X ",
|
||||
rtinfo->hdrlen, hdrlen,
|
||||
!(rtinfo->flags & IP6T_RT_LEN) ||
|
||||
((rtinfo->hdrlen == hdrlen) ^
|
||||
!!(rtinfo->invflags & IP6T_RT_INV_LEN)));
|
||||
pr_debug("res %02X %02X %02X ",
|
||||
rtinfo->flags & IP6T_RT_RES,
|
||||
((const struct rt0_hdr *)rh)->reserved,
|
||||
!((rtinfo->flags & IP6T_RT_RES) &&
|
||||
(((const struct rt0_hdr *)rh)->reserved)));
|
||||
|
||||
ret = (segsleft_match(rtinfo->segsleft[0], rtinfo->segsleft[1],
|
||||
rh->segments_left,
|
||||
!!(rtinfo->invflags & IP6T_RT_INV_SGS))) &&
|
||||
|
@ -107,22 +78,22 @@ static bool rt_mt6(const struct sk_buff *skb, struct xt_action_param *par)
|
|||
reserved),
|
||||
sizeof(_reserved),
|
||||
&_reserved);
|
||||
if (!rp) {
|
||||
par->hotdrop = true;
|
||||
return false;
|
||||
}
|
||||
|
||||
ret = (*rp == 0);
|
||||
}
|
||||
|
||||
pr_debug("#%d ", rtinfo->addrnr);
|
||||
if (!(rtinfo->flags & IP6T_RT_FST)) {
|
||||
return ret;
|
||||
} else if (rtinfo->flags & IP6T_RT_FST_NSTRICT) {
|
||||
pr_debug("Not strict ");
|
||||
if (rtinfo->addrnr > (unsigned int)((hdrlen - 8) / 16)) {
|
||||
pr_debug("There isn't enough space\n");
|
||||
return false;
|
||||
} else {
|
||||
unsigned int i = 0;
|
||||
|
||||
pr_debug("#%d ", rtinfo->addrnr);
|
||||
for (temp = 0;
|
||||
temp < (unsigned int)((hdrlen - 8) / 16);
|
||||
temp++) {
|
||||
|
@ -138,26 +109,20 @@ static bool rt_mt6(const struct sk_buff *skb, struct xt_action_param *par)
|
|||
return false;
|
||||
}
|
||||
|
||||
if (ipv6_addr_equal(ap, &rtinfo->addrs[i])) {
|
||||
pr_debug("i=%d temp=%d;\n", i, temp);
|
||||
if (ipv6_addr_equal(ap, &rtinfo->addrs[i]))
|
||||
i++;
|
||||
}
|
||||
if (i == rtinfo->addrnr)
|
||||
break;
|
||||
}
|
||||
pr_debug("i=%d #%d\n", i, rtinfo->addrnr);
|
||||
if (i == rtinfo->addrnr)
|
||||
return ret;
|
||||
else
|
||||
return false;
|
||||
}
|
||||
} else {
|
||||
pr_debug("Strict ");
|
||||
if (rtinfo->addrnr > (unsigned int)((hdrlen - 8) / 16)) {
|
||||
pr_debug("There isn't enough space\n");
|
||||
return false;
|
||||
} else {
|
||||
pr_debug("#%d ", rtinfo->addrnr);
|
||||
for (temp = 0; temp < rtinfo->addrnr; temp++) {
|
||||
ap = skb_header_pointer(skb,
|
||||
ptr
|
||||
|
@ -173,7 +138,6 @@ static bool rt_mt6(const struct sk_buff *skb, struct xt_action_param *par)
|
|||
if (!ipv6_addr_equal(ap, &rtinfo->addrs[temp]))
|
||||
break;
|
||||
}
|
||||
pr_debug("temp=%d #%d\n", temp, rtinfo->addrnr);
|
||||
if (temp == rtinfo->addrnr &&
|
||||
temp == (unsigned int)((hdrlen - 8) / 16))
|
||||
return ret;
|
||||
|
|
|
@ -599,6 +599,7 @@ static int tcp_v6_parse_md5_keys(struct sock *sk, int optname,
|
|||
struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
|
||||
int l3index = 0;
|
||||
u8 prefixlen;
|
||||
u8 flags;
|
||||
|
||||
if (optlen < sizeof(cmd))
|
||||
return -EINVAL;
|
||||
|
@ -609,6 +610,8 @@ static int tcp_v6_parse_md5_keys(struct sock *sk, int optname,
|
|||
if (sin6->sin6_family != AF_INET6)
|
||||
return -EINVAL;
|
||||
|
||||
flags = cmd.tcpm_flags & TCP_MD5SIG_FLAG_IFINDEX;
|
||||
|
||||
if (optname == TCP_MD5SIG_EXT &&
|
||||
cmd.tcpm_flags & TCP_MD5SIG_FLAG_PREFIX) {
|
||||
prefixlen = cmd.tcpm_prefixlen;
|
||||
|
@ -619,7 +622,7 @@ static int tcp_v6_parse_md5_keys(struct sock *sk, int optname,
|
|||
prefixlen = ipv6_addr_v4mapped(&sin6->sin6_addr) ? 32 : 128;
|
||||
}
|
||||
|
||||
if (optname == TCP_MD5SIG_EXT &&
|
||||
if (optname == TCP_MD5SIG_EXT && cmd.tcpm_ifindex &&
|
||||
cmd.tcpm_flags & TCP_MD5SIG_FLAG_IFINDEX) {
|
||||
struct net_device *dev;
|
||||
|
||||
|
@ -640,9 +643,9 @@ static int tcp_v6_parse_md5_keys(struct sock *sk, int optname,
|
|||
if (ipv6_addr_v4mapped(&sin6->sin6_addr))
|
||||
return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
|
||||
AF_INET, prefixlen,
|
||||
l3index);
|
||||
l3index, flags);
|
||||
return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
|
||||
AF_INET6, prefixlen, l3index);
|
||||
AF_INET6, prefixlen, l3index, flags);
|
||||
}
|
||||
|
||||
if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
|
||||
|
@ -650,12 +653,12 @@ static int tcp_v6_parse_md5_keys(struct sock *sk, int optname,
|
|||
|
||||
if (ipv6_addr_v4mapped(&sin6->sin6_addr))
|
||||
return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
|
||||
AF_INET, prefixlen, l3index,
|
||||
AF_INET, prefixlen, l3index, flags,
|
||||
cmd.tcpm_key, cmd.tcpm_keylen,
|
||||
GFP_KERNEL);
|
||||
|
||||
return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
|
||||
AF_INET6, prefixlen, l3index,
|
||||
AF_INET6, prefixlen, l3index, flags,
|
||||
cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
|
||||
}
|
||||
|
||||
|
@ -1404,7 +1407,7 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
|
|||
* across. Shucks.
|
||||
*/
|
||||
tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newsk->sk_v6_daddr,
|
||||
AF_INET6, 128, l3index, key->key, key->keylen,
|
||||
AF_INET6, 128, l3index, key->flags, key->key, key->keylen,
|
||||
sk_gfp_mask(sk, GFP_ATOMIC));
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -109,7 +109,7 @@ config NF_CONNTRACK_MARK
|
|||
config NF_CONNTRACK_SECMARK
|
||||
bool 'Connection tracking security mark support'
|
||||
depends on NETWORK_SECMARK
|
||||
default m if NETFILTER_ADVANCED=n
|
||||
default y if NETFILTER_ADVANCED=n
|
||||
help
|
||||
This option enables security markings to be applied to
|
||||
connections. Typically they are copied to connections from
|
||||
|
|
|
@ -4090,6 +4090,11 @@ static int __net_init ip_vs_control_net_init_sysctl(struct netns_ipvs *ipvs)
|
|||
tbl[idx++].data = &ipvs->sysctl_conn_reuse_mode;
|
||||
tbl[idx++].data = &ipvs->sysctl_schedule_icmp;
|
||||
tbl[idx++].data = &ipvs->sysctl_ignore_tunneled;
|
||||
#ifdef CONFIG_IP_VS_DEBUG
|
||||
/* Global sysctls must be ro in non-init netns */
|
||||
if (!net_eq(net, &init_net))
|
||||
tbl[idx++].mode = 0444;
|
||||
#endif
|
||||
|
||||
ipvs->sysctl_hdr = register_net_sysctl(net, "net/ipv4/vs", tbl);
|
||||
if (ipvs->sysctl_hdr == NULL) {
|
||||
|
|
|
@ -342,12 +342,6 @@ static void nft_netdev_event(unsigned long event, struct net_device *dev,
|
|||
return;
|
||||
}
|
||||
|
||||
/* UNREGISTER events are also happening on netns exit.
|
||||
*
|
||||
* Although nf_tables core releases all tables/chains, only this event
|
||||
* handler provides guarantee that hook->ops.dev is still accessible,
|
||||
* so we cannot skip exiting net namespaces.
|
||||
*/
|
||||
__nft_release_basechain(ctx);
|
||||
}
|
||||
|
||||
|
@ -366,6 +360,9 @@ static int nf_tables_netdev_event(struct notifier_block *this,
|
|||
event != NETDEV_CHANGENAME)
|
||||
return NOTIFY_DONE;
|
||||
|
||||
if (!check_net(ctx.net))
|
||||
return NOTIFY_DONE;
|
||||
|
||||
nft_net = nft_pernet(ctx.net);
|
||||
mutex_lock(&nft_net->commit_mutex);
|
||||
list_for_each_entry(table, &nft_net->tables, list) {
|
||||
|
|
|
@ -137,7 +137,7 @@ static int idletimer_tg_create(struct idletimer_tg_info *info)
|
|||
{
|
||||
int ret;
|
||||
|
||||
info->timer = kmalloc(sizeof(*info->timer), GFP_KERNEL);
|
||||
info->timer = kzalloc(sizeof(*info->timer), GFP_KERNEL);
|
||||
if (!info->timer) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
|
|
|
@ -960,6 +960,7 @@ static int tcf_ct_act(struct sk_buff *skb, const struct tc_action *a,
|
|||
tmpl = p->tmpl;
|
||||
|
||||
tcf_lastuse_update(&c->tcf_tm);
|
||||
tcf_action_update_bstats(&c->common, skb);
|
||||
|
||||
if (clear) {
|
||||
qdisc_skb_cb(skb)->post_ct = false;
|
||||
|
@ -1049,7 +1050,6 @@ out_push:
|
|||
|
||||
qdisc_skb_cb(skb)->post_ct = true;
|
||||
out_clear:
|
||||
tcf_action_update_bstats(&c->common, skb);
|
||||
if (defrag)
|
||||
qdisc_skb_cb(skb)->pkt_len = skb->len;
|
||||
return retval;
|
||||
|
|
|
@ -43,3 +43,4 @@ CONFIG_NET_ACT_TUNNEL_KEY=m
|
|||
CONFIG_NET_ACT_MIRRED=m
|
||||
CONFIG_BAREUDP=m
|
||||
CONFIG_IPV6_IOAM6_LWTUNNEL=y
|
||||
CONFIG_CRYPTO_SM4=y
|
||||
|
|
|
@ -289,6 +289,12 @@ set_sysctl()
|
|||
run_cmd sysctl -q -w $*
|
||||
}
|
||||
|
||||
# get sysctl values in NS-A
|
||||
get_sysctl()
|
||||
{
|
||||
${NSA_CMD} sysctl -n $*
|
||||
}
|
||||
|
||||
################################################################################
|
||||
# Setup for tests
|
||||
|
||||
|
@ -1003,6 +1009,60 @@ ipv4_tcp_md5()
|
|||
run_cmd nettest -s -I ${NSA_DEV} -M ${MD5_PW} -m ${NS_NET}
|
||||
log_test $? 1 "MD5: VRF: Device must be a VRF - prefix"
|
||||
|
||||
test_ipv4_md5_vrf__vrf_server__no_bind_ifindex
|
||||
test_ipv4_md5_vrf__global_server__bind_ifindex0
|
||||
}
|
||||
|
||||
test_ipv4_md5_vrf__vrf_server__no_bind_ifindex()
|
||||
{
|
||||
log_start
|
||||
show_hint "Simulates applications using VRF without TCP_MD5SIG_FLAG_IFINDEX"
|
||||
run_cmd nettest -s -I ${VRF} -M ${MD5_PW} -m ${NS_NET} --no-bind-key-ifindex &
|
||||
sleep 1
|
||||
run_cmd_nsb nettest -r ${NSA_IP} -X ${MD5_PW}
|
||||
log_test $? 0 "MD5: VRF: VRF-bound server, unbound key accepts connection"
|
||||
|
||||
log_start
|
||||
show_hint "Binding both the socket and the key is not required but it works"
|
||||
run_cmd nettest -s -I ${VRF} -M ${MD5_PW} -m ${NS_NET} --force-bind-key-ifindex &
|
||||
sleep 1
|
||||
run_cmd_nsb nettest -r ${NSA_IP} -X ${MD5_PW}
|
||||
log_test $? 0 "MD5: VRF: VRF-bound server, bound key accepts connection"
|
||||
}
|
||||
|
||||
test_ipv4_md5_vrf__global_server__bind_ifindex0()
|
||||
{
|
||||
# This particular test needs tcp_l3mdev_accept=1 for Global server to accept VRF connections
|
||||
local old_tcp_l3mdev_accept
|
||||
old_tcp_l3mdev_accept=$(get_sysctl net.ipv4.tcp_l3mdev_accept)
|
||||
set_sysctl net.ipv4.tcp_l3mdev_accept=1
|
||||
|
||||
log_start
|
||||
run_cmd nettest -s -M ${MD5_PW} -m ${NS_NET} --force-bind-key-ifindex &
|
||||
sleep 1
|
||||
run_cmd_nsb nettest -r ${NSA_IP} -X ${MD5_PW}
|
||||
log_test $? 2 "MD5: VRF: Global server, Key bound to ifindex=0 rejects VRF connection"
|
||||
|
||||
log_start
|
||||
run_cmd nettest -s -M ${MD5_PW} -m ${NS_NET} --force-bind-key-ifindex &
|
||||
sleep 1
|
||||
run_cmd_nsc nettest -r ${NSA_IP} -X ${MD5_PW}
|
||||
log_test $? 0 "MD5: VRF: Global server, key bound to ifindex=0 accepts non-VRF connection"
|
||||
log_start
|
||||
|
||||
run_cmd nettest -s -M ${MD5_PW} -m ${NS_NET} --no-bind-key-ifindex &
|
||||
sleep 1
|
||||
run_cmd_nsb nettest -r ${NSA_IP} -X ${MD5_PW}
|
||||
log_test $? 0 "MD5: VRF: Global server, key not bound to ifindex accepts VRF connection"
|
||||
|
||||
log_start
|
||||
run_cmd nettest -s -M ${MD5_PW} -m ${NS_NET} --no-bind-key-ifindex &
|
||||
sleep 1
|
||||
run_cmd_nsc nettest -r ${NSA_IP} -X ${MD5_PW}
|
||||
log_test $? 0 "MD5: VRF: Global server, key not bound to ifindex accepts non-VRF connection"
|
||||
|
||||
# restore value
|
||||
set_sysctl net.ipv4.tcp_l3mdev_accept="$old_tcp_l3mdev_accept"
|
||||
}
|
||||
|
||||
ipv4_tcp_novrf()
|
||||
|
|
|
@ -9,6 +9,7 @@ TEST_PROGS = bridge_igmp.sh \
|
|||
gre_inner_v4_multipath.sh \
|
||||
gre_inner_v6_multipath.sh \
|
||||
gre_multipath.sh \
|
||||
ip6_forward_instats_vrf.sh \
|
||||
ip6gre_inner_v4_multipath.sh \
|
||||
ip6gre_inner_v6_multipath.sh \
|
||||
ipip_flat_gre_key.sh \
|
||||
|
|
|
@ -39,3 +39,5 @@ NETIF_CREATE=yes
|
|||
# Timeout (in seconds) before ping exits regardless of how many packets have
|
||||
# been sent or received
|
||||
PING_TIMEOUT=5
|
||||
# IPv6 traceroute utility name.
|
||||
TROUTE6=traceroute6
|
||||
|
|
172
tools/testing/selftests/net/forwarding/ip6_forward_instats_vrf.sh
Executable file
172
tools/testing/selftests/net/forwarding/ip6_forward_instats_vrf.sh
Executable file
|
@ -0,0 +1,172 @@
|
|||
#!/bin/bash
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
# Test ipv6 stats on the incoming if when forwarding with VRF
|
||||
|
||||
ALL_TESTS="
|
||||
ipv6_ping
|
||||
ipv6_in_too_big_err
|
||||
ipv6_in_hdr_err
|
||||
ipv6_in_addr_err
|
||||
ipv6_in_discard
|
||||
"
|
||||
|
||||
NUM_NETIFS=4
|
||||
source lib.sh
|
||||
|
||||
h1_create()
|
||||
{
|
||||
simple_if_init $h1 2001:1:1::2/64
|
||||
ip -6 route add vrf v$h1 2001:1:2::/64 via 2001:1:1::1
|
||||
}
|
||||
|
||||
h1_destroy()
|
||||
{
|
||||
ip -6 route del vrf v$h1 2001:1:2::/64 via 2001:1:1::1
|
||||
simple_if_fini $h1 2001:1:1::2/64
|
||||
}
|
||||
|
||||
router_create()
|
||||
{
|
||||
vrf_create router
|
||||
__simple_if_init $rtr1 router 2001:1:1::1/64
|
||||
__simple_if_init $rtr2 router 2001:1:2::1/64
|
||||
mtu_set $rtr2 1280
|
||||
}
|
||||
|
||||
router_destroy()
|
||||
{
|
||||
mtu_restore $rtr2
|
||||
__simple_if_fini $rtr2 2001:1:2::1/64
|
||||
__simple_if_fini $rtr1 2001:1:1::1/64
|
||||
vrf_destroy router
|
||||
}
|
||||
|
||||
h2_create()
|
||||
{
|
||||
simple_if_init $h2 2001:1:2::2/64
|
||||
ip -6 route add vrf v$h2 2001:1:1::/64 via 2001:1:2::1
|
||||
mtu_set $h2 1280
|
||||
}
|
||||
|
||||
h2_destroy()
|
||||
{
|
||||
mtu_restore $h2
|
||||
ip -6 route del vrf v$h2 2001:1:1::/64 via 2001:1:2::1
|
||||
simple_if_fini $h2 2001:1:2::2/64
|
||||
}
|
||||
|
||||
setup_prepare()
|
||||
{
|
||||
h1=${NETIFS[p1]}
|
||||
rtr1=${NETIFS[p2]}
|
||||
|
||||
rtr2=${NETIFS[p3]}
|
||||
h2=${NETIFS[p4]}
|
||||
|
||||
vrf_prepare
|
||||
h1_create
|
||||
router_create
|
||||
h2_create
|
||||
|
||||
forwarding_enable
|
||||
}
|
||||
|
||||
cleanup()
|
||||
{
|
||||
pre_cleanup
|
||||
|
||||
forwarding_restore
|
||||
|
||||
h2_destroy
|
||||
router_destroy
|
||||
h1_destroy
|
||||
vrf_cleanup
|
||||
}
|
||||
|
||||
ipv6_in_too_big_err()
|
||||
{
|
||||
RET=0
|
||||
|
||||
local t0=$(ipv6_stats_get $rtr1 Ip6InTooBigErrors)
|
||||
local vrf_name=$(master_name_get $h1)
|
||||
|
||||
# Send too big packets
|
||||
ip vrf exec $vrf_name \
|
||||
$PING6 -s 1300 2001:1:2::2 -c 1 -w $PING_TIMEOUT &> /dev/null
|
||||
|
||||
local t1=$(ipv6_stats_get $rtr1 Ip6InTooBigErrors)
|
||||
test "$((t1 - t0))" -ne 0
|
||||
check_err $?
|
||||
log_test "Ip6InTooBigErrors"
|
||||
}
|
||||
|
||||
ipv6_in_hdr_err()
|
||||
{
|
||||
RET=0
|
||||
|
||||
local t0=$(ipv6_stats_get $rtr1 Ip6InHdrErrors)
|
||||
local vrf_name=$(master_name_get $h1)
|
||||
|
||||
# Send packets with hop limit 1, easiest with traceroute6 as some ping6
|
||||
# doesn't allow hop limit to be specified
|
||||
ip vrf exec $vrf_name \
|
||||
$TROUTE6 2001:1:2::2 &> /dev/null
|
||||
|
||||
local t1=$(ipv6_stats_get $rtr1 Ip6InHdrErrors)
|
||||
test "$((t1 - t0))" -ne 0
|
||||
check_err $?
|
||||
log_test "Ip6InHdrErrors"
|
||||
}
|
||||
|
||||
ipv6_in_addr_err()
|
||||
{
|
||||
RET=0
|
||||
|
||||
local t0=$(ipv6_stats_get $rtr1 Ip6InAddrErrors)
|
||||
local vrf_name=$(master_name_get $h1)
|
||||
|
||||
# Disable forwarding temporary while sending the packet
|
||||
sysctl -qw net.ipv6.conf.all.forwarding=0
|
||||
ip vrf exec $vrf_name \
|
||||
$PING6 2001:1:2::2 -c 1 -w $PING_TIMEOUT &> /dev/null
|
||||
sysctl -qw net.ipv6.conf.all.forwarding=1
|
||||
|
||||
local t1=$(ipv6_stats_get $rtr1 Ip6InAddrErrors)
|
||||
test "$((t1 - t0))" -ne 0
|
||||
check_err $?
|
||||
log_test "Ip6InAddrErrors"
|
||||
}
|
||||
|
||||
ipv6_in_discard()
|
||||
{
|
||||
RET=0
|
||||
|
||||
local t0=$(ipv6_stats_get $rtr1 Ip6InDiscards)
|
||||
local vrf_name=$(master_name_get $h1)
|
||||
|
||||
# Add a policy to discard
|
||||
ip xfrm policy add dst 2001:1:2::2/128 dir fwd action block
|
||||
ip vrf exec $vrf_name \
|
||||
$PING6 2001:1:2::2 -c 1 -w $PING_TIMEOUT &> /dev/null
|
||||
ip xfrm policy del dst 2001:1:2::2/128 dir fwd
|
||||
|
||||
local t1=$(ipv6_stats_get $rtr1 Ip6InDiscards)
|
||||
test "$((t1 - t0))" -ne 0
|
||||
check_err $?
|
||||
log_test "Ip6InDiscards"
|
||||
}
|
||||
ipv6_ping()
|
||||
{
|
||||
RET=0
|
||||
|
||||
ping6_test $h1 2001:1:2::2
|
||||
}
|
||||
|
||||
trap cleanup EXIT
|
||||
|
||||
setup_prepare
|
||||
setup_wait
|
||||
tests_run
|
||||
|
||||
exit $EXIT_STATUS
|
|
@ -751,6 +751,14 @@ qdisc_parent_stats_get()
|
|||
| jq '.[] | select(.parent == "'"$parent"'") | '"$selector"
|
||||
}
|
||||
|
||||
ipv6_stats_get()
|
||||
{
|
||||
local dev=$1; shift
|
||||
local stat=$1; shift
|
||||
|
||||
cat /proc/net/dev_snmp6/$dev | grep "^$stat" | cut -f2
|
||||
}
|
||||
|
||||
humanize()
|
||||
{
|
||||
local speed=$1; shift
|
||||
|
|
|
@ -28,6 +28,7 @@
|
|||
#include <unistd.h>
|
||||
#include <time.h>
|
||||
#include <errno.h>
|
||||
#include <getopt.h>
|
||||
|
||||
#include <linux/xfrm.h>
|
||||
#include <linux/ipsec.h>
|
||||
|
@ -101,6 +102,8 @@ struct sock_args {
|
|||
struct sockaddr_in6 v6;
|
||||
} md5_prefix;
|
||||
unsigned int prefix_len;
|
||||
/* 0: default, -1: force off, +1: force on */
|
||||
int bind_key_ifindex;
|
||||
|
||||
/* expected addresses and device index for connection */
|
||||
const char *expected_dev;
|
||||
|
@ -271,11 +274,14 @@ static int tcp_md5sig(int sd, void *addr, socklen_t alen, struct sock_args *args
|
|||
}
|
||||
memcpy(&md5sig.tcpm_addr, addr, alen);
|
||||
|
||||
if (args->ifindex) {
|
||||
if ((args->ifindex && args->bind_key_ifindex >= 0) || args->bind_key_ifindex >= 1) {
|
||||
opt = TCP_MD5SIG_EXT;
|
||||
md5sig.tcpm_flags |= TCP_MD5SIG_FLAG_IFINDEX;
|
||||
|
||||
md5sig.tcpm_ifindex = args->ifindex;
|
||||
log_msg("TCP_MD5SIG_FLAG_IFINDEX set tcpm_ifindex=%d\n", md5sig.tcpm_ifindex);
|
||||
} else {
|
||||
log_msg("TCP_MD5SIG_FLAG_IFINDEX off\n", md5sig.tcpm_ifindex);
|
||||
}
|
||||
|
||||
rc = setsockopt(sd, IPPROTO_TCP, opt, &md5sig, sizeof(md5sig));
|
||||
|
@ -1822,6 +1828,14 @@ static int ipc_parent(int cpid, int fd, struct sock_args *args)
|
|||
}
|
||||
|
||||
#define GETOPT_STR "sr:l:c:p:t:g:P:DRn:M:X:m:d:I:BN:O:SCi6xL:0:1:2:3:Fbq"
|
||||
#define OPT_FORCE_BIND_KEY_IFINDEX 1001
|
||||
#define OPT_NO_BIND_KEY_IFINDEX 1002
|
||||
|
||||
static struct option long_opts[] = {
|
||||
{"force-bind-key-ifindex", 0, 0, OPT_FORCE_BIND_KEY_IFINDEX},
|
||||
{"no-bind-key-ifindex", 0, 0, OPT_NO_BIND_KEY_IFINDEX},
|
||||
{0, 0, 0, 0}
|
||||
};
|
||||
|
||||
static void print_usage(char *prog)
|
||||
{
|
||||
|
@ -1858,6 +1872,10 @@ static void print_usage(char *prog)
|
|||
" -M password use MD5 sum protection\n"
|
||||
" -X password MD5 password for client mode\n"
|
||||
" -m prefix/len prefix and length to use for MD5 key\n"
|
||||
" --no-bind-key-ifindex: Force TCP_MD5SIG_FLAG_IFINDEX off\n"
|
||||
" --force-bind-key-ifindex: Force TCP_MD5SIG_FLAG_IFINDEX on\n"
|
||||
" (default: only if -I is passed)\n"
|
||||
"\n"
|
||||
" -g grp multicast group (e.g., 239.1.1.1)\n"
|
||||
" -i interactive mode (default is echo and terminate)\n"
|
||||
"\n"
|
||||
|
@ -1893,7 +1911,7 @@ int main(int argc, char *argv[])
|
|||
* process input args
|
||||
*/
|
||||
|
||||
while ((rc = getopt(argc, argv, GETOPT_STR)) != -1) {
|
||||
while ((rc = getopt_long(argc, argv, GETOPT_STR, long_opts, NULL)) != -1) {
|
||||
switch (rc) {
|
||||
case 'B':
|
||||
both_mode = 1;
|
||||
|
@ -1966,6 +1984,12 @@ int main(int argc, char *argv[])
|
|||
case 'M':
|
||||
args.password = optarg;
|
||||
break;
|
||||
case OPT_FORCE_BIND_KEY_IFINDEX:
|
||||
args.bind_key_ifindex = 1;
|
||||
break;
|
||||
case OPT_NO_BIND_KEY_IFINDEX:
|
||||
args.bind_key_ifindex = -1;
|
||||
break;
|
||||
case 'X':
|
||||
args.client_pw = optarg;
|
||||
break;
|
||||
|
|
|
@ -199,7 +199,6 @@ fi
|
|||
# test basic connectivity
|
||||
if ! ip netns exec ns1 ping -c 1 -q 10.0.2.99 > /dev/null; then
|
||||
echo "ERROR: ns1 cannot reach ns2" 1>&2
|
||||
bash
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
|
|
@ -741,6 +741,149 @@ EOF
|
|||
return $lret
|
||||
}
|
||||
|
||||
# test port shadowing.
|
||||
# create two listening services, one on router (ns0), one
|
||||
# on client (ns2), which is masqueraded from ns1 point of view.
|
||||
# ns2 sends udp packet coming from service port to ns1, on a highport.
|
||||
# Later, if n1 uses same highport to connect to ns0:service, packet
|
||||
# might be port-forwarded to ns2 instead.
|
||||
|
||||
# second argument tells if we expect the 'fake-entry' to take effect
|
||||
# (CLIENT) or not (ROUTER).
|
||||
test_port_shadow()
|
||||
{
|
||||
local test=$1
|
||||
local expect=$2
|
||||
local daddrc="10.0.1.99"
|
||||
local daddrs="10.0.1.1"
|
||||
local result=""
|
||||
local logmsg=""
|
||||
|
||||
echo ROUTER | ip netns exec "$ns0" nc -w 5 -u -l -p 1405 >/dev/null 2>&1 &
|
||||
nc_r=$!
|
||||
|
||||
echo CLIENT | ip netns exec "$ns2" nc -w 5 -u -l -p 1405 >/dev/null 2>&1 &
|
||||
nc_c=$!
|
||||
|
||||
# make shadow entry, from client (ns2), going to (ns1), port 41404, sport 1405.
|
||||
echo "fake-entry" | ip netns exec "$ns2" nc -w 1 -p 1405 -u "$daddrc" 41404 > /dev/null
|
||||
|
||||
# ns1 tries to connect to ns0:1405. With default settings this should connect
|
||||
# to client, it matches the conntrack entry created above.
|
||||
|
||||
result=$(echo "" | ip netns exec "$ns1" nc -w 1 -p 41404 -u "$daddrs" 1405)
|
||||
|
||||
if [ "$result" = "$expect" ] ;then
|
||||
echo "PASS: portshadow test $test: got reply from ${expect}${logmsg}"
|
||||
else
|
||||
echo "ERROR: portshadow test $test: got reply from \"$result\", not $expect as intended"
|
||||
ret=1
|
||||
fi
|
||||
|
||||
kill $nc_r $nc_c 2>/dev/null
|
||||
|
||||
# flush udp entries for next test round, if any
|
||||
ip netns exec "$ns0" conntrack -F >/dev/null 2>&1
|
||||
}
|
||||
|
||||
# This prevents port shadow of router service via packet filter,
|
||||
# packets claiming to originate from service port from internal
|
||||
# network are dropped.
|
||||
test_port_shadow_filter()
|
||||
{
|
||||
local family=$1
|
||||
|
||||
ip netns exec "$ns0" nft -f /dev/stdin <<EOF
|
||||
table $family filter {
|
||||
chain forward {
|
||||
type filter hook forward priority 0; policy accept;
|
||||
meta iif veth1 udp sport 1405 drop
|
||||
}
|
||||
}
|
||||
EOF
|
||||
test_port_shadow "port-filter" "ROUTER"
|
||||
|
||||
ip netns exec "$ns0" nft delete table $family filter
|
||||
}
|
||||
|
||||
# This prevents port shadow of router service via notrack.
|
||||
test_port_shadow_notrack()
|
||||
{
|
||||
local family=$1
|
||||
|
||||
ip netns exec "$ns0" nft -f /dev/stdin <<EOF
|
||||
table $family raw {
|
||||
chain prerouting {
|
||||
type filter hook prerouting priority -300; policy accept;
|
||||
meta iif veth0 udp dport 1405 notrack
|
||||
udp dport 1405 notrack
|
||||
}
|
||||
chain output {
|
||||
type filter hook output priority -300; policy accept;
|
||||
udp sport 1405 notrack
|
||||
}
|
||||
}
|
||||
EOF
|
||||
test_port_shadow "port-notrack" "ROUTER"
|
||||
|
||||
ip netns exec "$ns0" nft delete table $family raw
|
||||
}
|
||||
|
||||
# This prevents port shadow of router service via sport remap.
|
||||
test_port_shadow_pat()
|
||||
{
|
||||
local family=$1
|
||||
|
||||
ip netns exec "$ns0" nft -f /dev/stdin <<EOF
|
||||
table $family pat {
|
||||
chain postrouting {
|
||||
type nat hook postrouting priority -1; policy accept;
|
||||
meta iif veth1 udp sport <= 1405 masquerade to : 1406-65535 random
|
||||
}
|
||||
}
|
||||
EOF
|
||||
test_port_shadow "pat" "ROUTER"
|
||||
|
||||
ip netns exec "$ns0" nft delete table $family pat
|
||||
}
|
||||
|
||||
test_port_shadowing()
|
||||
{
|
||||
local family="ip"
|
||||
|
||||
ip netns exec "$ns0" sysctl net.ipv4.conf.veth0.forwarding=1 > /dev/null
|
||||
ip netns exec "$ns0" sysctl net.ipv4.conf.veth1.forwarding=1 > /dev/null
|
||||
|
||||
ip netns exec "$ns0" nft -f /dev/stdin <<EOF
|
||||
table $family nat {
|
||||
chain postrouting {
|
||||
type nat hook postrouting priority 0; policy accept;
|
||||
meta oif veth0 masquerade
|
||||
}
|
||||
}
|
||||
EOF
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "SKIP: Could not add add $family masquerade hook"
|
||||
return $ksft_skip
|
||||
fi
|
||||
|
||||
# test default behaviour. Packet from ns1 to ns0 is redirected to ns2.
|
||||
test_port_shadow "default" "CLIENT"
|
||||
|
||||
# test packet filter based mitigation: prevent forwarding of
|
||||
# packets claiming to come from the service port.
|
||||
test_port_shadow_filter "$family"
|
||||
|
||||
# test conntrack based mitigation: connections going or coming
|
||||
# from router:service bypass connection tracking.
|
||||
test_port_shadow_notrack "$family"
|
||||
|
||||
# test nat based mitigation: fowarded packets coming from service port
|
||||
# are masqueraded with random highport.
|
||||
test_port_shadow_pat "$family"
|
||||
|
||||
ip netns exec "$ns0" nft delete table $family nat
|
||||
}
|
||||
|
||||
# ip netns exec "$ns0" ping -c 1 -q 10.0.$i.99
|
||||
for i in 0 1 2; do
|
||||
|
@ -861,6 +1004,8 @@ reset_counters
|
|||
$test_inet_nat && test_redirect inet
|
||||
$test_inet_nat && test_redirect6 inet
|
||||
|
||||
test_port_shadowing
|
||||
|
||||
if [ $ret -ne 0 ];then
|
||||
echo -n "FAIL: "
|
||||
nft --version
|
||||
|
|
|
@ -332,8 +332,6 @@ static void test_no_sockets(const struct test_opts *opts)
|
|||
read_vsock_stat(&sockets);
|
||||
|
||||
check_no_sockets(&sockets);
|
||||
|
||||
free_sock_stat(&sockets);
|
||||
}
|
||||
|
||||
static void test_listen_socket_server(const struct test_opts *opts)
|
||||
|
|
Loading…
Add table
Reference in a new issue