mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-06-26 16:41:25 +00:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Pull networking fixes from David Miller: 1) More jumbo frame fixes in r8169, from Heiner Kallweit. 2) Fix bpf build in minimal configuration, from Alexei Starovoitov. 3) Use after free in slcan driver, from Jouni Hogander. 4) Flower classifier port ranges don't work properly in the HW offload case, from Yoshiki Komachi. 5) Use after free in hns3_nic_maybe_stop_tx(), from Yunsheng Lin. 6) Out of bounds access in mqprio_dump(), from Vladyslav Tarasiuk. 7) Fix flow dissection in dsa TX path, from Alexander Lobakin. 8) Stale syncookie timestampe fixes from Guillaume Nault. [ Did an evil merge to silence a warning introduced by this pull - Linus ] * git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (84 commits) r8169: fix rtl_hw_jumbo_disable for RTL8168evl net_sched: validate TCA_KIND attribute in tc_chain_tmplt_add() r8169: add missing RX enabling for WoL on RTL8125 vhost/vsock: accept only packets with the right dst_cid net: phy: dp83867: fix hfs boot in rgmii mode net: ethernet: ti: cpsw: fix extra rx interrupt inet: protect against too small mtu values. gre: refetch erspan header from skb->data after pskb_may_pull() pppoe: remove redundant BUG_ON() check in pppoe_pernet tcp: Protect accesses to .ts_recent_stamp with {READ,WRITE}_ONCE() tcp: tighten acceptance of ACKs not matching a child socket tcp: fix rejected syncookies due to stale timestamps lpc_eth: kernel BUG on remove tcp: md5: fix potential overestimation of TCP option space net: sched: allow indirect blocks to bind to clsact in TC net: core: rename indirect block ingress cb function net-sysfs: Call dev_hold always in netdev_queue_add_kobject net: dsa: fix flow dissection on Tx path net/tls: Fix return values to avoid ENOTSUPP net: avoid an indirect call in ____sys_recvmsg() ...
This commit is contained in:
commit
95e6ba5133
119 changed files with 1026 additions and 629 deletions
17
MAINTAINERS
17
MAINTAINERS
|
@ -10107,6 +10107,15 @@ W: https://linuxtv.org
|
||||||
S: Maintained
|
S: Maintained
|
||||||
F: drivers/media/radio/radio-maxiradio*
|
F: drivers/media/radio/radio-maxiradio*
|
||||||
|
|
||||||
|
MCAN MMIO DEVICE DRIVER
|
||||||
|
M: Sriram Dash <sriram.dash@samsung.com>
|
||||||
|
L: linux-can@vger.kernel.org
|
||||||
|
S: Maintained
|
||||||
|
F: Documentation/devicetree/bindings/net/can/m_can.txt
|
||||||
|
F: drivers/net/can/m_can/m_can.c
|
||||||
|
F: drivers/net/can/m_can/m_can.h
|
||||||
|
F: drivers/net/can/m_can/m_can_platform.c
|
||||||
|
|
||||||
MCP4018 AND MCP4531 MICROCHIP DIGITAL POTENTIOMETER DRIVERS
|
MCP4018 AND MCP4531 MICROCHIP DIGITAL POTENTIOMETER DRIVERS
|
||||||
M: Peter Rosin <peda@axentia.se>
|
M: Peter Rosin <peda@axentia.se>
|
||||||
L: linux-iio@vger.kernel.org
|
L: linux-iio@vger.kernel.org
|
||||||
|
@ -18139,6 +18148,14 @@ M: Radhey Shyam Pandey <radhey.shyam.pandey@xilinx.com>
|
||||||
S: Maintained
|
S: Maintained
|
||||||
F: drivers/net/ethernet/xilinx/xilinx_axienet*
|
F: drivers/net/ethernet/xilinx/xilinx_axienet*
|
||||||
|
|
||||||
|
XILINX CAN DRIVER
|
||||||
|
M: Appana Durga Kedareswara rao <appana.durga.rao@xilinx.com>
|
||||||
|
R: Naga Sureshkumar Relli <naga.sureshkumar.relli@xilinx.com>
|
||||||
|
L: linux-can@vger.kernel.org
|
||||||
|
S: Maintained
|
||||||
|
F: Documentation/devicetree/bindings/net/can/xilinx_can.txt
|
||||||
|
F: drivers/net/can/xilinx_can.c
|
||||||
|
|
||||||
XILINX UARTLITE SERIAL DRIVER
|
XILINX UARTLITE SERIAL DRIVER
|
||||||
M: Peter Korsgaard <jacmet@sunsite.dk>
|
M: Peter Korsgaard <jacmet@sunsite.dk>
|
||||||
L: linux-serial@vger.kernel.org
|
L: linux-serial@vger.kernel.org
|
||||||
|
|
|
@ -421,16 +421,15 @@ static int addr6_resolve(struct sockaddr *src_sock,
|
||||||
(const struct sockaddr_in6 *)dst_sock;
|
(const struct sockaddr_in6 *)dst_sock;
|
||||||
struct flowi6 fl6;
|
struct flowi6 fl6;
|
||||||
struct dst_entry *dst;
|
struct dst_entry *dst;
|
||||||
int ret;
|
|
||||||
|
|
||||||
memset(&fl6, 0, sizeof fl6);
|
memset(&fl6, 0, sizeof fl6);
|
||||||
fl6.daddr = dst_in->sin6_addr;
|
fl6.daddr = dst_in->sin6_addr;
|
||||||
fl6.saddr = src_in->sin6_addr;
|
fl6.saddr = src_in->sin6_addr;
|
||||||
fl6.flowi6_oif = addr->bound_dev_if;
|
fl6.flowi6_oif = addr->bound_dev_if;
|
||||||
|
|
||||||
ret = ipv6_stub->ipv6_dst_lookup(addr->net, NULL, &dst, &fl6);
|
dst = ipv6_stub->ipv6_dst_lookup_flow(addr->net, NULL, &fl6, NULL);
|
||||||
if (ret < 0)
|
if (IS_ERR(dst))
|
||||||
return ret;
|
return PTR_ERR(dst);
|
||||||
|
|
||||||
if (ipv6_addr_any(&src_in->sin6_addr))
|
if (ipv6_addr_any(&src_in->sin6_addr))
|
||||||
src_in->sin6_addr = fl6.saddr;
|
src_in->sin6_addr = fl6.saddr;
|
||||||
|
|
|
@ -117,10 +117,12 @@ static struct dst_entry *rxe_find_route6(struct net_device *ndev,
|
||||||
memcpy(&fl6.daddr, daddr, sizeof(*daddr));
|
memcpy(&fl6.daddr, daddr, sizeof(*daddr));
|
||||||
fl6.flowi6_proto = IPPROTO_UDP;
|
fl6.flowi6_proto = IPPROTO_UDP;
|
||||||
|
|
||||||
if (unlikely(ipv6_stub->ipv6_dst_lookup(sock_net(recv_sockets.sk6->sk),
|
ndst = ipv6_stub->ipv6_dst_lookup_flow(sock_net(recv_sockets.sk6->sk),
|
||||||
recv_sockets.sk6->sk, &ndst, &fl6))) {
|
recv_sockets.sk6->sk, &fl6,
|
||||||
|
NULL);
|
||||||
|
if (unlikely(IS_ERR(ndst))) {
|
||||||
pr_err_ratelimited("no route to %pI6\n", daddr);
|
pr_err_ratelimited("no route to %pI6\n", daddr);
|
||||||
goto put;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (unlikely(ndst->error)) {
|
if (unlikely(ndst->error)) {
|
||||||
|
|
|
@ -617,6 +617,7 @@ err_free_chan:
|
||||||
sl->tty = NULL;
|
sl->tty = NULL;
|
||||||
tty->disc_data = NULL;
|
tty->disc_data = NULL;
|
||||||
clear_bit(SLF_INUSE, &sl->flags);
|
clear_bit(SLF_INUSE, &sl->flags);
|
||||||
|
slc_free_netdev(sl->dev);
|
||||||
free_netdev(sl->dev);
|
free_netdev(sl->dev);
|
||||||
|
|
||||||
err_exit:
|
err_exit:
|
||||||
|
|
|
@ -792,7 +792,7 @@ resubmit:
|
||||||
up);
|
up);
|
||||||
|
|
||||||
usb_anchor_urb(urb, &up->rx_urbs);
|
usb_anchor_urb(urb, &up->rx_urbs);
|
||||||
ret = usb_submit_urb(urb, GFP_KERNEL);
|
ret = usb_submit_urb(urb, GFP_ATOMIC);
|
||||||
|
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
netdev_err(up->netdev,
|
netdev_err(up->netdev,
|
||||||
|
|
|
@ -542,16 +542,17 @@ static int xcan_do_set_mode(struct net_device *ndev, enum can_mode mode)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* xcan_write_frame - Write a frame to HW
|
* xcan_write_frame - Write a frame to HW
|
||||||
* @priv: Driver private data structure
|
* @ndev: Pointer to net_device structure
|
||||||
* @skb: sk_buff pointer that contains data to be Txed
|
* @skb: sk_buff pointer that contains data to be Txed
|
||||||
* @frame_offset: Register offset to write the frame to
|
* @frame_offset: Register offset to write the frame to
|
||||||
*/
|
*/
|
||||||
static void xcan_write_frame(struct xcan_priv *priv, struct sk_buff *skb,
|
static void xcan_write_frame(struct net_device *ndev, struct sk_buff *skb,
|
||||||
int frame_offset)
|
int frame_offset)
|
||||||
{
|
{
|
||||||
u32 id, dlc, data[2] = {0, 0};
|
u32 id, dlc, data[2] = {0, 0};
|
||||||
struct canfd_frame *cf = (struct canfd_frame *)skb->data;
|
struct canfd_frame *cf = (struct canfd_frame *)skb->data;
|
||||||
u32 ramoff, dwindex = 0, i;
|
u32 ramoff, dwindex = 0, i;
|
||||||
|
struct xcan_priv *priv = netdev_priv(ndev);
|
||||||
|
|
||||||
/* Watch carefully on the bit sequence */
|
/* Watch carefully on the bit sequence */
|
||||||
if (cf->can_id & CAN_EFF_FLAG) {
|
if (cf->can_id & CAN_EFF_FLAG) {
|
||||||
|
@ -587,6 +588,14 @@ static void xcan_write_frame(struct xcan_priv *priv, struct sk_buff *skb,
|
||||||
dlc |= XCAN_DLCR_EDL_MASK;
|
dlc |= XCAN_DLCR_EDL_MASK;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (!(priv->devtype.flags & XCAN_FLAG_TX_MAILBOXES) &&
|
||||||
|
(priv->devtype.flags & XCAN_FLAG_TXFEMP))
|
||||||
|
can_put_echo_skb(skb, ndev, priv->tx_head % priv->tx_max);
|
||||||
|
else
|
||||||
|
can_put_echo_skb(skb, ndev, 0);
|
||||||
|
|
||||||
|
priv->tx_head++;
|
||||||
|
|
||||||
priv->write_reg(priv, XCAN_FRAME_ID_OFFSET(frame_offset), id);
|
priv->write_reg(priv, XCAN_FRAME_ID_OFFSET(frame_offset), id);
|
||||||
/* If the CAN frame is RTR frame this write triggers transmission
|
/* If the CAN frame is RTR frame this write triggers transmission
|
||||||
* (not on CAN FD)
|
* (not on CAN FD)
|
||||||
|
@ -638,13 +647,9 @@ static int xcan_start_xmit_fifo(struct sk_buff *skb, struct net_device *ndev)
|
||||||
XCAN_SR_TXFLL_MASK))
|
XCAN_SR_TXFLL_MASK))
|
||||||
return -ENOSPC;
|
return -ENOSPC;
|
||||||
|
|
||||||
can_put_echo_skb(skb, ndev, priv->tx_head % priv->tx_max);
|
|
||||||
|
|
||||||
spin_lock_irqsave(&priv->tx_lock, flags);
|
spin_lock_irqsave(&priv->tx_lock, flags);
|
||||||
|
|
||||||
priv->tx_head++;
|
xcan_write_frame(ndev, skb, XCAN_TXFIFO_OFFSET);
|
||||||
|
|
||||||
xcan_write_frame(priv, skb, XCAN_TXFIFO_OFFSET);
|
|
||||||
|
|
||||||
/* Clear TX-FIFO-empty interrupt for xcan_tx_interrupt() */
|
/* Clear TX-FIFO-empty interrupt for xcan_tx_interrupt() */
|
||||||
if (priv->tx_max > 1)
|
if (priv->tx_max > 1)
|
||||||
|
@ -675,13 +680,9 @@ static int xcan_start_xmit_mailbox(struct sk_buff *skb, struct net_device *ndev)
|
||||||
BIT(XCAN_TX_MAILBOX_IDX)))
|
BIT(XCAN_TX_MAILBOX_IDX)))
|
||||||
return -ENOSPC;
|
return -ENOSPC;
|
||||||
|
|
||||||
can_put_echo_skb(skb, ndev, 0);
|
|
||||||
|
|
||||||
spin_lock_irqsave(&priv->tx_lock, flags);
|
spin_lock_irqsave(&priv->tx_lock, flags);
|
||||||
|
|
||||||
priv->tx_head++;
|
xcan_write_frame(ndev, skb,
|
||||||
|
|
||||||
xcan_write_frame(priv, skb,
|
|
||||||
XCAN_TXMSG_FRAME_OFFSET(XCAN_TX_MAILBOX_IDX));
|
XCAN_TXMSG_FRAME_OFFSET(XCAN_TX_MAILBOX_IDX));
|
||||||
|
|
||||||
/* Mark buffer as ready for transmit */
|
/* Mark buffer as ready for transmit */
|
||||||
|
@ -1772,7 +1773,8 @@ static int xcan_probe(struct platform_device *pdev)
|
||||||
|
|
||||||
priv->bus_clk = devm_clk_get(&pdev->dev, devtype->bus_clk_name);
|
priv->bus_clk = devm_clk_get(&pdev->dev, devtype->bus_clk_name);
|
||||||
if (IS_ERR(priv->bus_clk)) {
|
if (IS_ERR(priv->bus_clk)) {
|
||||||
dev_err(&pdev->dev, "bus clock not found\n");
|
if (PTR_ERR(priv->bus_clk) != -EPROBE_DEFER)
|
||||||
|
dev_err(&pdev->dev, "bus clock not found\n");
|
||||||
ret = PTR_ERR(priv->bus_clk);
|
ret = PTR_ERR(priv->bus_clk);
|
||||||
goto err_free;
|
goto err_free;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1115,7 +1115,7 @@ static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid)
|
||||||
phy_interface_mode(lmac->lmac_type)))
|
phy_interface_mode(lmac->lmac_type)))
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
phy_start_aneg(lmac->phydev);
|
phy_start(lmac->phydev);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1332,6 +1332,7 @@ static int enetc_phy_connect(struct net_device *ndev)
|
||||||
{
|
{
|
||||||
struct enetc_ndev_priv *priv = netdev_priv(ndev);
|
struct enetc_ndev_priv *priv = netdev_priv(ndev);
|
||||||
struct phy_device *phydev;
|
struct phy_device *phydev;
|
||||||
|
struct ethtool_eee edata;
|
||||||
|
|
||||||
if (!priv->phy_node)
|
if (!priv->phy_node)
|
||||||
return 0; /* phy-less mode */
|
return 0; /* phy-less mode */
|
||||||
|
@ -1345,6 +1346,10 @@ static int enetc_phy_connect(struct net_device *ndev)
|
||||||
|
|
||||||
phy_attached_info(phydev);
|
phy_attached_info(phydev);
|
||||||
|
|
||||||
|
/* disable EEE autoneg, until ENETC driver supports it */
|
||||||
|
memset(&edata, 0, sizeof(struct ethtool_eee));
|
||||||
|
phy_ethtool_set_eee(phydev, &edata);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1287,30 +1287,25 @@ static bool hns3_skb_need_linearized(struct sk_buff *skb, unsigned int *bd_size,
|
||||||
}
|
}
|
||||||
|
|
||||||
static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring,
|
static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring,
|
||||||
struct sk_buff **out_skb)
|
struct net_device *netdev,
|
||||||
|
struct sk_buff *skb)
|
||||||
{
|
{
|
||||||
|
struct hns3_nic_priv *priv = netdev_priv(netdev);
|
||||||
unsigned int bd_size[HNS3_MAX_TSO_BD_NUM + 1U];
|
unsigned int bd_size[HNS3_MAX_TSO_BD_NUM + 1U];
|
||||||
struct sk_buff *skb = *out_skb;
|
|
||||||
unsigned int bd_num;
|
unsigned int bd_num;
|
||||||
|
|
||||||
bd_num = hns3_tx_bd_num(skb, bd_size);
|
bd_num = hns3_tx_bd_num(skb, bd_size);
|
||||||
if (unlikely(bd_num > HNS3_MAX_NON_TSO_BD_NUM)) {
|
if (unlikely(bd_num > HNS3_MAX_NON_TSO_BD_NUM)) {
|
||||||
struct sk_buff *new_skb;
|
|
||||||
|
|
||||||
if (bd_num <= HNS3_MAX_TSO_BD_NUM && skb_is_gso(skb) &&
|
if (bd_num <= HNS3_MAX_TSO_BD_NUM && skb_is_gso(skb) &&
|
||||||
!hns3_skb_need_linearized(skb, bd_size, bd_num))
|
!hns3_skb_need_linearized(skb, bd_size, bd_num))
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
/* manual split the send packet */
|
if (__skb_linearize(skb))
|
||||||
new_skb = skb_copy(skb, GFP_ATOMIC);
|
|
||||||
if (!new_skb)
|
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
dev_kfree_skb_any(skb);
|
|
||||||
*out_skb = new_skb;
|
|
||||||
|
|
||||||
bd_num = hns3_tx_bd_count(new_skb->len);
|
bd_num = hns3_tx_bd_count(skb->len);
|
||||||
if ((skb_is_gso(new_skb) && bd_num > HNS3_MAX_TSO_BD_NUM) ||
|
if ((skb_is_gso(skb) && bd_num > HNS3_MAX_TSO_BD_NUM) ||
|
||||||
(!skb_is_gso(new_skb) &&
|
(!skb_is_gso(skb) &&
|
||||||
bd_num > HNS3_MAX_NON_TSO_BD_NUM))
|
bd_num > HNS3_MAX_NON_TSO_BD_NUM))
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
@ -1320,10 +1315,23 @@ static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring,
|
||||||
}
|
}
|
||||||
|
|
||||||
out:
|
out:
|
||||||
if (unlikely(ring_space(ring) < bd_num))
|
if (likely(ring_space(ring) >= bd_num))
|
||||||
return -EBUSY;
|
return bd_num;
|
||||||
|
|
||||||
return bd_num;
|
netif_stop_subqueue(netdev, ring->queue_index);
|
||||||
|
smp_mb(); /* Memory barrier before checking ring_space */
|
||||||
|
|
||||||
|
/* Start queue in case hns3_clean_tx_ring has just made room
|
||||||
|
* available and has not seen the queue stopped state performed
|
||||||
|
* by netif_stop_subqueue above.
|
||||||
|
*/
|
||||||
|
if (ring_space(ring) >= bd_num && netif_carrier_ok(netdev) &&
|
||||||
|
!test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) {
|
||||||
|
netif_start_subqueue(netdev, ring->queue_index);
|
||||||
|
return bd_num;
|
||||||
|
}
|
||||||
|
|
||||||
|
return -EBUSY;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void hns3_clear_desc(struct hns3_enet_ring *ring, int next_to_use_orig)
|
static void hns3_clear_desc(struct hns3_enet_ring *ring, int next_to_use_orig)
|
||||||
|
@ -1400,13 +1408,13 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
|
||||||
/* Prefetch the data used later */
|
/* Prefetch the data used later */
|
||||||
prefetch(skb->data);
|
prefetch(skb->data);
|
||||||
|
|
||||||
ret = hns3_nic_maybe_stop_tx(ring, &skb);
|
ret = hns3_nic_maybe_stop_tx(ring, netdev, skb);
|
||||||
if (unlikely(ret <= 0)) {
|
if (unlikely(ret <= 0)) {
|
||||||
if (ret == -EBUSY) {
|
if (ret == -EBUSY) {
|
||||||
u64_stats_update_begin(&ring->syncp);
|
u64_stats_update_begin(&ring->syncp);
|
||||||
ring->stats.tx_busy++;
|
ring->stats.tx_busy++;
|
||||||
u64_stats_update_end(&ring->syncp);
|
u64_stats_update_end(&ring->syncp);
|
||||||
goto out_net_tx_busy;
|
return NETDEV_TX_BUSY;
|
||||||
} else if (ret == -ENOMEM) {
|
} else if (ret == -ENOMEM) {
|
||||||
u64_stats_update_begin(&ring->syncp);
|
u64_stats_update_begin(&ring->syncp);
|
||||||
ring->stats.sw_err_cnt++;
|
ring->stats.sw_err_cnt++;
|
||||||
|
@ -1457,12 +1465,6 @@ fill_err:
|
||||||
out_err_tx_ok:
|
out_err_tx_ok:
|
||||||
dev_kfree_skb_any(skb);
|
dev_kfree_skb_any(skb);
|
||||||
return NETDEV_TX_OK;
|
return NETDEV_TX_OK;
|
||||||
|
|
||||||
out_net_tx_busy:
|
|
||||||
netif_stop_subqueue(netdev, ring->queue_index);
|
|
||||||
smp_mb(); /* Commit all data before submit */
|
|
||||||
|
|
||||||
return NETDEV_TX_BUSY;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int hns3_nic_net_set_mac_address(struct net_device *netdev, void *p)
|
static int hns3_nic_net_set_mac_address(struct net_device *netdev, void *p)
|
||||||
|
@ -2519,7 +2521,7 @@ void hns3_clean_tx_ring(struct hns3_enet_ring *ring)
|
||||||
dev_queue = netdev_get_tx_queue(netdev, ring->tqp->tqp_index);
|
dev_queue = netdev_get_tx_queue(netdev, ring->tqp->tqp_index);
|
||||||
netdev_tx_completed_queue(dev_queue, pkts, bytes);
|
netdev_tx_completed_queue(dev_queue, pkts, bytes);
|
||||||
|
|
||||||
if (unlikely(pkts && netif_carrier_ok(netdev) &&
|
if (unlikely(netif_carrier_ok(netdev) &&
|
||||||
ring_space(ring) > HNS3_MAX_TSO_BD_NUM)) {
|
ring_space(ring) > HNS3_MAX_TSO_BD_NUM)) {
|
||||||
/* Make sure that anybody stopping the queue after this
|
/* Make sure that anybody stopping the queue after this
|
||||||
* sees the new next_to_clean.
|
* sees the new next_to_clean.
|
||||||
|
|
|
@ -8438,13 +8438,16 @@ static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
|
||||||
if (hdev->pdev->revision == 0x20)
|
if (hdev->pdev->revision == 0x20)
|
||||||
return -EOPNOTSUPP;
|
return -EOPNOTSUPP;
|
||||||
|
|
||||||
|
vport = hclge_get_vf_vport(hdev, vfid);
|
||||||
|
if (!vport)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
/* qos is a 3 bits value, so can not be bigger than 7 */
|
/* qos is a 3 bits value, so can not be bigger than 7 */
|
||||||
if (vfid >= hdev->num_alloc_vfs || vlan > VLAN_N_VID - 1 || qos > 7)
|
if (vlan > VLAN_N_VID - 1 || qos > 7)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
if (proto != htons(ETH_P_8021Q))
|
if (proto != htons(ETH_P_8021Q))
|
||||||
return -EPROTONOSUPPORT;
|
return -EPROTONOSUPPORT;
|
||||||
|
|
||||||
vport = &hdev->vport[vfid];
|
|
||||||
state = hclge_get_port_base_vlan_state(vport,
|
state = hclge_get_port_base_vlan_state(vport,
|
||||||
vport->port_base_vlan_cfg.state,
|
vport->port_base_vlan_cfg.state,
|
||||||
vlan);
|
vlan);
|
||||||
|
@ -8455,21 +8458,12 @@ static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
|
||||||
vlan_info.qos = qos;
|
vlan_info.qos = qos;
|
||||||
vlan_info.vlan_proto = ntohs(proto);
|
vlan_info.vlan_proto = ntohs(proto);
|
||||||
|
|
||||||
/* update port based VLAN for PF */
|
|
||||||
if (!vfid) {
|
|
||||||
hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
|
|
||||||
ret = hclge_update_port_base_vlan_cfg(vport, state, &vlan_info);
|
|
||||||
hclge_notify_client(hdev, HNAE3_UP_CLIENT);
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
|
if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
|
||||||
return hclge_update_port_base_vlan_cfg(vport, state,
|
return hclge_update_port_base_vlan_cfg(vport, state,
|
||||||
&vlan_info);
|
&vlan_info);
|
||||||
} else {
|
} else {
|
||||||
ret = hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
|
ret = hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
|
||||||
(u8)vfid, state,
|
vport->vport_id, state,
|
||||||
vlan, qos,
|
vlan, qos,
|
||||||
ntohs(proto));
|
ntohs(proto));
|
||||||
return ret;
|
return ret;
|
||||||
|
|
|
@ -816,7 +816,7 @@ struct mlx5e_xsk {
|
||||||
struct mlx5e_priv {
|
struct mlx5e_priv {
|
||||||
/* priv data path fields - start */
|
/* priv data path fields - start */
|
||||||
struct mlx5e_txqsq *txq2sq[MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC];
|
struct mlx5e_txqsq *txq2sq[MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC];
|
||||||
int channel_tc2txq[MLX5E_MAX_NUM_CHANNELS][MLX5E_MAX_NUM_TC];
|
int channel_tc2realtxq[MLX5E_MAX_NUM_CHANNELS][MLX5E_MAX_NUM_TC];
|
||||||
#ifdef CONFIG_MLX5_CORE_EN_DCB
|
#ifdef CONFIG_MLX5_CORE_EN_DCB
|
||||||
struct mlx5e_dcbx_dp dcbx_dp;
|
struct mlx5e_dcbx_dp dcbx_dp;
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -73,6 +73,7 @@ static const u32 mlx5e_ext_link_speed[MLX5E_EXT_LINK_MODES_NUMBER] = {
|
||||||
[MLX5E_50GAUI_2_LAUI_2_50GBASE_CR2_KR2] = 50000,
|
[MLX5E_50GAUI_2_LAUI_2_50GBASE_CR2_KR2] = 50000,
|
||||||
[MLX5E_50GAUI_1_LAUI_1_50GBASE_CR_KR] = 50000,
|
[MLX5E_50GAUI_1_LAUI_1_50GBASE_CR_KR] = 50000,
|
||||||
[MLX5E_CAUI_4_100GBASE_CR4_KR4] = 100000,
|
[MLX5E_CAUI_4_100GBASE_CR4_KR4] = 100000,
|
||||||
|
[MLX5E_100GAUI_2_100GBASE_CR2_KR2] = 100000,
|
||||||
[MLX5E_200GAUI_4_200GBASE_CR4_KR4] = 200000,
|
[MLX5E_200GAUI_4_200GBASE_CR4_KR4] = 200000,
|
||||||
[MLX5E_400GAUI_8] = 400000,
|
[MLX5E_400GAUI_8] = 400000,
|
||||||
};
|
};
|
||||||
|
|
|
@ -155,8 +155,11 @@ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer,
|
||||||
}
|
}
|
||||||
|
|
||||||
if (port_buffer->buffer[i].size <
|
if (port_buffer->buffer[i].size <
|
||||||
(xoff + max_mtu + (1 << MLX5E_BUFFER_CELL_SHIFT)))
|
(xoff + max_mtu + (1 << MLX5E_BUFFER_CELL_SHIFT))) {
|
||||||
|
pr_err("buffer_size[%d]=%d is not enough for lossless buffer\n",
|
||||||
|
i, port_buffer->buffer[i].size);
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
port_buffer->buffer[i].xoff = port_buffer->buffer[i].size - xoff;
|
port_buffer->buffer[i].xoff = port_buffer->buffer[i].size - xoff;
|
||||||
port_buffer->buffer[i].xon =
|
port_buffer->buffer[i].xon =
|
||||||
|
@ -232,6 +235,26 @@ static int update_buffer_lossy(unsigned int max_mtu,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int fill_pfc_en(struct mlx5_core_dev *mdev, u8 *pfc_en)
|
||||||
|
{
|
||||||
|
u32 g_rx_pause, g_tx_pause;
|
||||||
|
int err;
|
||||||
|
|
||||||
|
err = mlx5_query_port_pause(mdev, &g_rx_pause, &g_tx_pause);
|
||||||
|
if (err)
|
||||||
|
return err;
|
||||||
|
|
||||||
|
/* If global pause enabled, set all active buffers to lossless.
|
||||||
|
* Otherwise, check PFC setting.
|
||||||
|
*/
|
||||||
|
if (g_rx_pause || g_tx_pause)
|
||||||
|
*pfc_en = 0xff;
|
||||||
|
else
|
||||||
|
err = mlx5_query_port_pfc(mdev, pfc_en, NULL);
|
||||||
|
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
#define MINIMUM_MAX_MTU 9216
|
#define MINIMUM_MAX_MTU 9216
|
||||||
int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
|
int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
|
||||||
u32 change, unsigned int mtu,
|
u32 change, unsigned int mtu,
|
||||||
|
@ -277,7 +300,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
|
||||||
|
|
||||||
if (change & MLX5E_PORT_BUFFER_PRIO2BUFFER) {
|
if (change & MLX5E_PORT_BUFFER_PRIO2BUFFER) {
|
||||||
update_prio2buffer = true;
|
update_prio2buffer = true;
|
||||||
err = mlx5_query_port_pfc(priv->mdev, &curr_pfc_en, NULL);
|
err = fill_pfc_en(priv->mdev, &curr_pfc_en);
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
|
|
|
@ -297,10 +297,10 @@ static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv,
|
||||||
|
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
ret = ipv6_stub->ipv6_dst_lookup(dev_net(mirred_dev), NULL, &dst,
|
dst = ipv6_stub->ipv6_dst_lookup_flow(dev_net(mirred_dev), NULL, fl6,
|
||||||
fl6);
|
NULL);
|
||||||
if (ret < 0)
|
if (IS_ERR(dst))
|
||||||
return ret;
|
return PTR_ERR(dst);
|
||||||
|
|
||||||
if (!(*out_ttl))
|
if (!(*out_ttl))
|
||||||
*out_ttl = ip6_dst_hoplimit(dst);
|
*out_ttl = ip6_dst_hoplimit(dst);
|
||||||
|
@ -329,7 +329,7 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
|
||||||
struct net_device *out_dev, *route_dev;
|
struct net_device *out_dev, *route_dev;
|
||||||
struct flowi6 fl6 = {};
|
struct flowi6 fl6 = {};
|
||||||
struct ipv6hdr *ip6h;
|
struct ipv6hdr *ip6h;
|
||||||
struct neighbour *n;
|
struct neighbour *n = NULL;
|
||||||
int ipv6_encap_size;
|
int ipv6_encap_size;
|
||||||
char *encap_header;
|
char *encap_header;
|
||||||
u8 nud_state, ttl;
|
u8 nud_state, ttl;
|
||||||
|
|
|
@ -1027,18 +1027,11 @@ static bool ext_link_mode_requested(const unsigned long *adver)
|
||||||
return bitmap_intersects(modes, adver, __ETHTOOL_LINK_MODE_MASK_NBITS);
|
return bitmap_intersects(modes, adver, __ETHTOOL_LINK_MODE_MASK_NBITS);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool ext_speed_requested(u32 speed)
|
static bool ext_requested(u8 autoneg, const unsigned long *adver, bool ext_supported)
|
||||||
{
|
|
||||||
#define MLX5E_MAX_PTYS_LEGACY_SPEED 100000
|
|
||||||
return !!(speed > MLX5E_MAX_PTYS_LEGACY_SPEED);
|
|
||||||
}
|
|
||||||
|
|
||||||
static bool ext_requested(u8 autoneg, const unsigned long *adver, u32 speed)
|
|
||||||
{
|
{
|
||||||
bool ext_link_mode = ext_link_mode_requested(adver);
|
bool ext_link_mode = ext_link_mode_requested(adver);
|
||||||
bool ext_speed = ext_speed_requested(speed);
|
|
||||||
|
|
||||||
return autoneg == AUTONEG_ENABLE ? ext_link_mode : ext_speed;
|
return autoneg == AUTONEG_ENABLE ? ext_link_mode : ext_supported;
|
||||||
}
|
}
|
||||||
|
|
||||||
int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
|
int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
|
||||||
|
@ -1065,8 +1058,8 @@ int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
|
||||||
autoneg = link_ksettings->base.autoneg;
|
autoneg = link_ksettings->base.autoneg;
|
||||||
speed = link_ksettings->base.speed;
|
speed = link_ksettings->base.speed;
|
||||||
|
|
||||||
ext = ext_requested(autoneg, adver, speed),
|
|
||||||
ext_supported = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet);
|
ext_supported = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet);
|
||||||
|
ext = ext_requested(autoneg, adver, ext_supported);
|
||||||
if (!ext_supported && ext)
|
if (!ext_supported && ext)
|
||||||
return -EOPNOTSUPP;
|
return -EOPNOTSUPP;
|
||||||
|
|
||||||
|
@ -1643,7 +1636,7 @@ static int mlx5e_get_module_info(struct net_device *netdev,
|
||||||
break;
|
break;
|
||||||
case MLX5_MODULE_ID_SFP:
|
case MLX5_MODULE_ID_SFP:
|
||||||
modinfo->type = ETH_MODULE_SFF_8472;
|
modinfo->type = ETH_MODULE_SFF_8472;
|
||||||
modinfo->eeprom_len = MLX5_EEPROM_PAGE_LENGTH;
|
modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
netdev_err(priv->netdev, "%s: cable type not recognized:0x%x\n",
|
netdev_err(priv->netdev, "%s: cable type not recognized:0x%x\n",
|
||||||
|
|
|
@ -1691,11 +1691,10 @@ static int mlx5e_open_sqs(struct mlx5e_channel *c,
|
||||||
struct mlx5e_params *params,
|
struct mlx5e_params *params,
|
||||||
struct mlx5e_channel_param *cparam)
|
struct mlx5e_channel_param *cparam)
|
||||||
{
|
{
|
||||||
struct mlx5e_priv *priv = c->priv;
|
|
||||||
int err, tc;
|
int err, tc;
|
||||||
|
|
||||||
for (tc = 0; tc < params->num_tc; tc++) {
|
for (tc = 0; tc < params->num_tc; tc++) {
|
||||||
int txq_ix = c->ix + tc * priv->max_nch;
|
int txq_ix = c->ix + tc * params->num_channels;
|
||||||
|
|
||||||
err = mlx5e_open_txqsq(c, c->priv->tisn[c->lag_port][tc], txq_ix,
|
err = mlx5e_open_txqsq(c, c->priv->tisn[c->lag_port][tc], txq_ix,
|
||||||
params, &cparam->sq, &c->sq[tc], tc);
|
params, &cparam->sq, &c->sq[tc], tc);
|
||||||
|
@ -2876,26 +2875,21 @@ static void mlx5e_netdev_set_tcs(struct net_device *netdev)
|
||||||
netdev_set_tc_queue(netdev, tc, nch, 0);
|
netdev_set_tc_queue(netdev, tc, nch, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void mlx5e_build_tc2txq_maps(struct mlx5e_priv *priv)
|
static void mlx5e_build_txq_maps(struct mlx5e_priv *priv)
|
||||||
{
|
{
|
||||||
int i, tc;
|
int i, ch;
|
||||||
|
|
||||||
for (i = 0; i < priv->max_nch; i++)
|
ch = priv->channels.num;
|
||||||
for (tc = 0; tc < priv->profile->max_tc; tc++)
|
|
||||||
priv->channel_tc2txq[i][tc] = i + tc * priv->max_nch;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void mlx5e_build_tx2sq_maps(struct mlx5e_priv *priv)
|
for (i = 0; i < ch; i++) {
|
||||||
{
|
int tc;
|
||||||
struct mlx5e_channel *c;
|
|
||||||
struct mlx5e_txqsq *sq;
|
for (tc = 0; tc < priv->channels.params.num_tc; tc++) {
|
||||||
int i, tc;
|
struct mlx5e_channel *c = priv->channels.c[i];
|
||||||
|
struct mlx5e_txqsq *sq = &c->sq[tc];
|
||||||
|
|
||||||
for (i = 0; i < priv->channels.num; i++) {
|
|
||||||
c = priv->channels.c[i];
|
|
||||||
for (tc = 0; tc < c->num_tc; tc++) {
|
|
||||||
sq = &c->sq[tc];
|
|
||||||
priv->txq2sq[sq->txq_ix] = sq;
|
priv->txq2sq[sq->txq_ix] = sq;
|
||||||
|
priv->channel_tc2realtxq[i][tc] = i + tc * ch;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -2910,7 +2904,7 @@ void mlx5e_activate_priv_channels(struct mlx5e_priv *priv)
|
||||||
netif_set_real_num_tx_queues(netdev, num_txqs);
|
netif_set_real_num_tx_queues(netdev, num_txqs);
|
||||||
netif_set_real_num_rx_queues(netdev, num_rxqs);
|
netif_set_real_num_rx_queues(netdev, num_rxqs);
|
||||||
|
|
||||||
mlx5e_build_tx2sq_maps(priv);
|
mlx5e_build_txq_maps(priv);
|
||||||
mlx5e_activate_channels(&priv->channels);
|
mlx5e_activate_channels(&priv->channels);
|
||||||
mlx5e_xdp_tx_enable(priv);
|
mlx5e_xdp_tx_enable(priv);
|
||||||
netif_tx_start_all_queues(priv->netdev);
|
netif_tx_start_all_queues(priv->netdev);
|
||||||
|
@ -5021,7 +5015,6 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev,
|
||||||
if (err)
|
if (err)
|
||||||
mlx5_core_err(mdev, "TLS initialization failed, %d\n", err);
|
mlx5_core_err(mdev, "TLS initialization failed, %d\n", err);
|
||||||
mlx5e_build_nic_netdev(netdev);
|
mlx5e_build_nic_netdev(netdev);
|
||||||
mlx5e_build_tc2txq_maps(priv);
|
|
||||||
mlx5e_health_create_reporters(priv);
|
mlx5e_health_create_reporters(priv);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -1601,7 +1601,7 @@ static int mlx5e_grp_channels_fill_strings(struct mlx5e_priv *priv, u8 *data,
|
||||||
for (j = 0; j < NUM_SQ_STATS; j++)
|
for (j = 0; j < NUM_SQ_STATS; j++)
|
||||||
sprintf(data + (idx++) * ETH_GSTRING_LEN,
|
sprintf(data + (idx++) * ETH_GSTRING_LEN,
|
||||||
sq_stats_desc[j].format,
|
sq_stats_desc[j].format,
|
||||||
priv->channel_tc2txq[i][tc]);
|
i + tc * max_nch);
|
||||||
|
|
||||||
for (i = 0; i < max_nch; i++) {
|
for (i = 0; i < max_nch; i++) {
|
||||||
for (j = 0; j < NUM_XSKSQ_STATS * is_xsk; j++)
|
for (j = 0; j < NUM_XSKSQ_STATS * is_xsk; j++)
|
||||||
|
|
|
@ -1626,8 +1626,11 @@ static void __mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow)
|
||||||
|
|
||||||
flow_flag_clear(flow, DUP);
|
flow_flag_clear(flow, DUP);
|
||||||
|
|
||||||
mlx5e_tc_del_fdb_flow(flow->peer_flow->priv, flow->peer_flow);
|
if (refcount_dec_and_test(&flow->peer_flow->refcnt)) {
|
||||||
kvfree(flow->peer_flow);
|
mlx5e_tc_del_fdb_flow(flow->peer_flow->priv, flow->peer_flow);
|
||||||
|
kfree(flow->peer_flow);
|
||||||
|
}
|
||||||
|
|
||||||
flow->peer_flow = NULL;
|
flow->peer_flow = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -93,7 +93,7 @@ u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
|
||||||
if (txq_ix >= num_channels)
|
if (txq_ix >= num_channels)
|
||||||
txq_ix = priv->txq2sq[txq_ix]->ch_ix;
|
txq_ix = priv->txq2sq[txq_ix]->ch_ix;
|
||||||
|
|
||||||
return priv->channel_tc2txq[txq_ix][up];
|
return priv->channel_tc2realtxq[txq_ix][up];
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int mlx5e_skb_l2_header_offset(struct sk_buff *skb)
|
static inline int mlx5e_skb_l2_header_offset(struct sk_buff *skb)
|
||||||
|
|
|
@ -81,7 +81,14 @@ struct vport_ingress {
|
||||||
struct mlx5_fc *drop_counter;
|
struct mlx5_fc *drop_counter;
|
||||||
} legacy;
|
} legacy;
|
||||||
struct {
|
struct {
|
||||||
struct mlx5_flow_group *metadata_grp;
|
/* Optional group to add an FTE to do internal priority
|
||||||
|
* tagging on ingress packets.
|
||||||
|
*/
|
||||||
|
struct mlx5_flow_group *metadata_prio_tag_grp;
|
||||||
|
/* Group to add default match-all FTE entry to tag ingress
|
||||||
|
* packet with metadata.
|
||||||
|
*/
|
||||||
|
struct mlx5_flow_group *metadata_allmatch_grp;
|
||||||
struct mlx5_modify_hdr *modify_metadata;
|
struct mlx5_modify_hdr *modify_metadata;
|
||||||
struct mlx5_flow_handle *modify_metadata_rule;
|
struct mlx5_flow_handle *modify_metadata_rule;
|
||||||
} offloads;
|
} offloads;
|
||||||
|
|
|
@ -88,6 +88,14 @@ u16 mlx5_eswitch_get_prio_range(struct mlx5_eswitch *esw)
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool
|
||||||
|
esw_check_ingress_prio_tag_enabled(const struct mlx5_eswitch *esw,
|
||||||
|
const struct mlx5_vport *vport)
|
||||||
|
{
|
||||||
|
return (MLX5_CAP_GEN(esw->dev, prio_tag_required) &&
|
||||||
|
mlx5_eswitch_is_vf_vport(esw, vport->vport));
|
||||||
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw,
|
mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw,
|
||||||
struct mlx5_flow_spec *spec,
|
struct mlx5_flow_spec *spec,
|
||||||
|
@ -1760,12 +1768,9 @@ static int esw_vport_ingress_prio_tag_config(struct mlx5_eswitch *esw,
|
||||||
* required, allow
|
* required, allow
|
||||||
* Unmatched traffic is allowed by default
|
* Unmatched traffic is allowed by default
|
||||||
*/
|
*/
|
||||||
|
|
||||||
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
|
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
|
||||||
if (!spec) {
|
if (!spec)
|
||||||
err = -ENOMEM;
|
return -ENOMEM;
|
||||||
goto out_no_mem;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Untagged packets - push prio tag VLAN, allow */
|
/* Untagged packets - push prio tag VLAN, allow */
|
||||||
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag);
|
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag);
|
||||||
|
@ -1791,14 +1796,9 @@ static int esw_vport_ingress_prio_tag_config(struct mlx5_eswitch *esw,
|
||||||
"vport[%d] configure ingress untagged allow rule, err(%d)\n",
|
"vport[%d] configure ingress untagged allow rule, err(%d)\n",
|
||||||
vport->vport, err);
|
vport->vport, err);
|
||||||
vport->ingress.allow_rule = NULL;
|
vport->ingress.allow_rule = NULL;
|
||||||
goto out;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
out:
|
|
||||||
kvfree(spec);
|
kvfree(spec);
|
||||||
out_no_mem:
|
|
||||||
if (err)
|
|
||||||
esw_vport_cleanup_ingress_rules(esw, vport);
|
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1836,13 +1836,9 @@ static int esw_vport_add_ingress_acl_modify_metadata(struct mlx5_eswitch *esw,
|
||||||
esw_warn(esw->dev,
|
esw_warn(esw->dev,
|
||||||
"failed to add setting metadata rule for vport %d ingress acl, err(%d)\n",
|
"failed to add setting metadata rule for vport %d ingress acl, err(%d)\n",
|
||||||
vport->vport, err);
|
vport->vport, err);
|
||||||
vport->ingress.offloads.modify_metadata_rule = NULL;
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
|
|
||||||
out:
|
|
||||||
if (err)
|
|
||||||
mlx5_modify_header_dealloc(esw->dev, vport->ingress.offloads.modify_metadata);
|
mlx5_modify_header_dealloc(esw->dev, vport->ingress.offloads.modify_metadata);
|
||||||
|
vport->ingress.offloads.modify_metadata_rule = NULL;
|
||||||
|
}
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1862,50 +1858,103 @@ static int esw_vport_create_ingress_acl_group(struct mlx5_eswitch *esw,
|
||||||
{
|
{
|
||||||
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
|
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
|
||||||
struct mlx5_flow_group *g;
|
struct mlx5_flow_group *g;
|
||||||
|
void *match_criteria;
|
||||||
u32 *flow_group_in;
|
u32 *flow_group_in;
|
||||||
|
u32 flow_index = 0;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
flow_group_in = kvzalloc(inlen, GFP_KERNEL);
|
flow_group_in = kvzalloc(inlen, GFP_KERNEL);
|
||||||
if (!flow_group_in)
|
if (!flow_group_in)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
memset(flow_group_in, 0, inlen);
|
if (esw_check_ingress_prio_tag_enabled(esw, vport)) {
|
||||||
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
|
/* This group is to hold FTE to match untagged packets when prio_tag
|
||||||
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0);
|
* is enabled.
|
||||||
|
*/
|
||||||
|
memset(flow_group_in, 0, inlen);
|
||||||
|
|
||||||
g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
|
match_criteria = MLX5_ADDR_OF(create_flow_group_in,
|
||||||
if (IS_ERR(g)) {
|
flow_group_in, match_criteria);
|
||||||
ret = PTR_ERR(g);
|
MLX5_SET(create_flow_group_in, flow_group_in,
|
||||||
esw_warn(esw->dev,
|
match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
|
||||||
"Failed to create vport[%d] ingress metadata group, err(%d)\n",
|
MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.cvlan_tag);
|
||||||
vport->vport, ret);
|
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, flow_index);
|
||||||
goto grp_err;
|
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, flow_index);
|
||||||
|
|
||||||
|
g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
|
||||||
|
if (IS_ERR(g)) {
|
||||||
|
ret = PTR_ERR(g);
|
||||||
|
esw_warn(esw->dev, "vport[%d] ingress create untagged flow group, err(%d)\n",
|
||||||
|
vport->vport, ret);
|
||||||
|
goto prio_tag_err;
|
||||||
|
}
|
||||||
|
vport->ingress.offloads.metadata_prio_tag_grp = g;
|
||||||
|
flow_index++;
|
||||||
}
|
}
|
||||||
vport->ingress.offloads.metadata_grp = g;
|
|
||||||
grp_err:
|
if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
|
||||||
|
/* This group holds an FTE with no matches for add metadata for
|
||||||
|
* tagged packets, if prio-tag is enabled (as a fallthrough),
|
||||||
|
* or all traffic in case prio-tag is disabled.
|
||||||
|
*/
|
||||||
|
memset(flow_group_in, 0, inlen);
|
||||||
|
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, flow_index);
|
||||||
|
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, flow_index);
|
||||||
|
|
||||||
|
g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
|
||||||
|
if (IS_ERR(g)) {
|
||||||
|
ret = PTR_ERR(g);
|
||||||
|
esw_warn(esw->dev, "vport[%d] ingress create drop flow group, err(%d)\n",
|
||||||
|
vport->vport, ret);
|
||||||
|
goto metadata_err;
|
||||||
|
}
|
||||||
|
vport->ingress.offloads.metadata_allmatch_grp = g;
|
||||||
|
}
|
||||||
|
|
||||||
|
kvfree(flow_group_in);
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
metadata_err:
|
||||||
|
if (!IS_ERR_OR_NULL(vport->ingress.offloads.metadata_prio_tag_grp)) {
|
||||||
|
mlx5_destroy_flow_group(vport->ingress.offloads.metadata_prio_tag_grp);
|
||||||
|
vport->ingress.offloads.metadata_prio_tag_grp = NULL;
|
||||||
|
}
|
||||||
|
prio_tag_err:
|
||||||
kvfree(flow_group_in);
|
kvfree(flow_group_in);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void esw_vport_destroy_ingress_acl_group(struct mlx5_vport *vport)
|
static void esw_vport_destroy_ingress_acl_group(struct mlx5_vport *vport)
|
||||||
{
|
{
|
||||||
if (vport->ingress.offloads.metadata_grp) {
|
if (vport->ingress.offloads.metadata_allmatch_grp) {
|
||||||
mlx5_destroy_flow_group(vport->ingress.offloads.metadata_grp);
|
mlx5_destroy_flow_group(vport->ingress.offloads.metadata_allmatch_grp);
|
||||||
vport->ingress.offloads.metadata_grp = NULL;
|
vport->ingress.offloads.metadata_allmatch_grp = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (vport->ingress.offloads.metadata_prio_tag_grp) {
|
||||||
|
mlx5_destroy_flow_group(vport->ingress.offloads.metadata_prio_tag_grp);
|
||||||
|
vport->ingress.offloads.metadata_prio_tag_grp = NULL;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
|
static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
|
||||||
struct mlx5_vport *vport)
|
struct mlx5_vport *vport)
|
||||||
{
|
{
|
||||||
|
int num_ftes = 0;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
if (!mlx5_eswitch_vport_match_metadata_enabled(esw) &&
|
if (!mlx5_eswitch_vport_match_metadata_enabled(esw) &&
|
||||||
!MLX5_CAP_GEN(esw->dev, prio_tag_required))
|
!esw_check_ingress_prio_tag_enabled(esw, vport))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
esw_vport_cleanup_ingress_rules(esw, vport);
|
esw_vport_cleanup_ingress_rules(esw, vport);
|
||||||
err = esw_vport_create_ingress_acl_table(esw, vport, 1);
|
|
||||||
|
if (mlx5_eswitch_vport_match_metadata_enabled(esw))
|
||||||
|
num_ftes++;
|
||||||
|
if (esw_check_ingress_prio_tag_enabled(esw, vport))
|
||||||
|
num_ftes++;
|
||||||
|
|
||||||
|
err = esw_vport_create_ingress_acl_table(esw, vport, num_ftes);
|
||||||
if (err) {
|
if (err) {
|
||||||
esw_warn(esw->dev,
|
esw_warn(esw->dev,
|
||||||
"failed to enable ingress acl (%d) on vport[%d]\n",
|
"failed to enable ingress acl (%d) on vport[%d]\n",
|
||||||
|
@ -1926,8 +1975,7 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
|
||||||
goto metadata_err;
|
goto metadata_err;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (MLX5_CAP_GEN(esw->dev, prio_tag_required) &&
|
if (esw_check_ingress_prio_tag_enabled(esw, vport)) {
|
||||||
mlx5_eswitch_is_vf_vport(esw, vport->vport)) {
|
|
||||||
err = esw_vport_ingress_prio_tag_config(esw, vport);
|
err = esw_vport_ingress_prio_tag_config(esw, vport);
|
||||||
if (err)
|
if (err)
|
||||||
goto prio_tag_err;
|
goto prio_tag_err;
|
||||||
|
@ -1937,7 +1985,6 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
|
||||||
prio_tag_err:
|
prio_tag_err:
|
||||||
esw_vport_del_ingress_acl_modify_metadata(esw, vport);
|
esw_vport_del_ingress_acl_modify_metadata(esw, vport);
|
||||||
metadata_err:
|
metadata_err:
|
||||||
esw_vport_cleanup_ingress_rules(esw, vport);
|
|
||||||
esw_vport_destroy_ingress_acl_group(vport);
|
esw_vport_destroy_ingress_acl_group(vport);
|
||||||
group_err:
|
group_err:
|
||||||
esw_vport_destroy_ingress_acl_table(vport);
|
esw_vport_destroy_ingress_acl_table(vport);
|
||||||
|
@ -2008,8 +2055,9 @@ esw_vport_create_offloads_acl_tables(struct mlx5_eswitch *esw,
|
||||||
if (mlx5_eswitch_is_vf_vport(esw, vport->vport)) {
|
if (mlx5_eswitch_is_vf_vport(esw, vport->vport)) {
|
||||||
err = esw_vport_egress_config(esw, vport);
|
err = esw_vport_egress_config(esw, vport);
|
||||||
if (err) {
|
if (err) {
|
||||||
esw_vport_del_ingress_acl_modify_metadata(esw, vport);
|
|
||||||
esw_vport_cleanup_ingress_rules(esw, vport);
|
esw_vport_cleanup_ingress_rules(esw, vport);
|
||||||
|
esw_vport_del_ingress_acl_modify_metadata(esw, vport);
|
||||||
|
esw_vport_destroy_ingress_acl_group(vport);
|
||||||
esw_vport_destroy_ingress_acl_table(vport);
|
esw_vport_destroy_ingress_acl_table(vport);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -2021,8 +2069,8 @@ esw_vport_destroy_offloads_acl_tables(struct mlx5_eswitch *esw,
|
||||||
struct mlx5_vport *vport)
|
struct mlx5_vport *vport)
|
||||||
{
|
{
|
||||||
esw_vport_disable_egress_acl(esw, vport);
|
esw_vport_disable_egress_acl(esw, vport);
|
||||||
esw_vport_del_ingress_acl_modify_metadata(esw, vport);
|
|
||||||
esw_vport_cleanup_ingress_rules(esw, vport);
|
esw_vport_cleanup_ingress_rules(esw, vport);
|
||||||
|
esw_vport_del_ingress_acl_modify_metadata(esw, vport);
|
||||||
esw_vport_destroy_ingress_acl_group(vport);
|
esw_vport_destroy_ingress_acl_group(vport);
|
||||||
esw_vport_destroy_ingress_acl_table(vport);
|
esw_vport_destroy_ingress_acl_table(vport);
|
||||||
}
|
}
|
||||||
|
|
|
@ -2149,14 +2149,18 @@ static struct ptp_clock_info ocelot_ptp_clock_info = {
|
||||||
|
|
||||||
static int ocelot_init_timestamp(struct ocelot *ocelot)
|
static int ocelot_init_timestamp(struct ocelot *ocelot)
|
||||||
{
|
{
|
||||||
|
struct ptp_clock *ptp_clock;
|
||||||
|
|
||||||
ocelot->ptp_info = ocelot_ptp_clock_info;
|
ocelot->ptp_info = ocelot_ptp_clock_info;
|
||||||
ocelot->ptp_clock = ptp_clock_register(&ocelot->ptp_info, ocelot->dev);
|
ptp_clock = ptp_clock_register(&ocelot->ptp_info, ocelot->dev);
|
||||||
if (IS_ERR(ocelot->ptp_clock))
|
if (IS_ERR(ptp_clock))
|
||||||
return PTR_ERR(ocelot->ptp_clock);
|
return PTR_ERR(ptp_clock);
|
||||||
/* Check if PHC support is missing at the configuration level */
|
/* Check if PHC support is missing at the configuration level */
|
||||||
if (!ocelot->ptp_clock)
|
if (!ptp_clock)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
ocelot->ptp_clock = ptp_clock;
|
||||||
|
|
||||||
ocelot_write(ocelot, SYS_PTP_CFG_PTP_STAMP_WID(30), SYS_PTP_CFG);
|
ocelot_write(ocelot, SYS_PTP_CFG_PTP_STAMP_WID(30), SYS_PTP_CFG);
|
||||||
ocelot_write(ocelot, 0xffffffff, ANA_TABLES_PTP_ID_LOW);
|
ocelot_write(ocelot, 0xffffffff, ANA_TABLES_PTP_ID_LOW);
|
||||||
ocelot_write(ocelot, 0xffffffff, ANA_TABLES_PTP_ID_HIGH);
|
ocelot_write(ocelot, 0xffffffff, ANA_TABLES_PTP_ID_HIGH);
|
||||||
|
@ -2489,6 +2493,8 @@ void ocelot_deinit(struct ocelot *ocelot)
|
||||||
destroy_workqueue(ocelot->stats_queue);
|
destroy_workqueue(ocelot->stats_queue);
|
||||||
mutex_destroy(&ocelot->stats_lock);
|
mutex_destroy(&ocelot->stats_lock);
|
||||||
ocelot_ace_deinit();
|
ocelot_ace_deinit();
|
||||||
|
if (ocelot->ptp_clock)
|
||||||
|
ptp_clock_unregister(ocelot->ptp_clock);
|
||||||
|
|
||||||
for (i = 0; i < ocelot->num_phys_ports; i++) {
|
for (i = 0; i < ocelot->num_phys_ports; i++) {
|
||||||
port = ocelot->ports[i];
|
port = ocelot->ports[i];
|
||||||
|
|
|
@ -817,8 +817,6 @@ static int lpc_mii_init(struct netdata_local *pldat)
|
||||||
pldat->mii_bus->priv = pldat;
|
pldat->mii_bus->priv = pldat;
|
||||||
pldat->mii_bus->parent = &pldat->pdev->dev;
|
pldat->mii_bus->parent = &pldat->pdev->dev;
|
||||||
|
|
||||||
platform_set_drvdata(pldat->pdev, pldat->mii_bus);
|
|
||||||
|
|
||||||
node = of_get_child_by_name(pldat->pdev->dev.of_node, "mdio");
|
node = of_get_child_by_name(pldat->pdev->dev.of_node, "mdio");
|
||||||
err = of_mdiobus_register(pldat->mii_bus, node);
|
err = of_mdiobus_register(pldat->mii_bus, node);
|
||||||
of_node_put(node);
|
of_node_put(node);
|
||||||
|
|
|
@ -1381,12 +1381,9 @@ int ionic_lif_rss_config(struct ionic_lif *lif, const u16 types,
|
||||||
|
|
||||||
static int ionic_lif_rss_init(struct ionic_lif *lif)
|
static int ionic_lif_rss_init(struct ionic_lif *lif)
|
||||||
{
|
{
|
||||||
u8 rss_key[IONIC_RSS_HASH_KEY_SIZE];
|
|
||||||
unsigned int tbl_sz;
|
unsigned int tbl_sz;
|
||||||
unsigned int i;
|
unsigned int i;
|
||||||
|
|
||||||
netdev_rss_key_fill(rss_key, IONIC_RSS_HASH_KEY_SIZE);
|
|
||||||
|
|
||||||
lif->rss_types = IONIC_RSS_TYPE_IPV4 |
|
lif->rss_types = IONIC_RSS_TYPE_IPV4 |
|
||||||
IONIC_RSS_TYPE_IPV4_TCP |
|
IONIC_RSS_TYPE_IPV4_TCP |
|
||||||
IONIC_RSS_TYPE_IPV4_UDP |
|
IONIC_RSS_TYPE_IPV4_UDP |
|
||||||
|
@ -1399,12 +1396,18 @@ static int ionic_lif_rss_init(struct ionic_lif *lif)
|
||||||
for (i = 0; i < tbl_sz; i++)
|
for (i = 0; i < tbl_sz; i++)
|
||||||
lif->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, lif->nxqs);
|
lif->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, lif->nxqs);
|
||||||
|
|
||||||
return ionic_lif_rss_config(lif, lif->rss_types, rss_key, NULL);
|
return ionic_lif_rss_config(lif, lif->rss_types, NULL, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int ionic_lif_rss_deinit(struct ionic_lif *lif)
|
static void ionic_lif_rss_deinit(struct ionic_lif *lif)
|
||||||
{
|
{
|
||||||
return ionic_lif_rss_config(lif, 0x0, NULL, NULL);
|
int tbl_sz;
|
||||||
|
|
||||||
|
tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz);
|
||||||
|
memset(lif->rss_ind_tbl, 0, tbl_sz);
|
||||||
|
memset(lif->rss_hash_key, 0, IONIC_RSS_HASH_KEY_SIZE);
|
||||||
|
|
||||||
|
ionic_lif_rss_config(lif, 0x0, NULL, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ionic_txrx_disable(struct ionic_lif *lif)
|
static void ionic_txrx_disable(struct ionic_lif *lif)
|
||||||
|
@ -1729,6 +1732,7 @@ static struct ionic_lif *ionic_lif_alloc(struct ionic *ionic, unsigned int index
|
||||||
dev_err(dev, "Failed to allocate rss indirection table, aborting\n");
|
dev_err(dev, "Failed to allocate rss indirection table, aborting\n");
|
||||||
goto err_out_free_qcqs;
|
goto err_out_free_qcqs;
|
||||||
}
|
}
|
||||||
|
netdev_rss_key_fill(lif->rss_hash_key, IONIC_RSS_HASH_KEY_SIZE);
|
||||||
|
|
||||||
list_add_tail(&lif->list, &ionic->lifs);
|
list_add_tail(&lif->list, &ionic->lifs);
|
||||||
|
|
||||||
|
|
|
@ -3695,7 +3695,7 @@ static void rtl_wol_suspend_quirk(struct rtl8169_private *tp)
|
||||||
case RTL_GIGA_MAC_VER_32:
|
case RTL_GIGA_MAC_VER_32:
|
||||||
case RTL_GIGA_MAC_VER_33:
|
case RTL_GIGA_MAC_VER_33:
|
||||||
case RTL_GIGA_MAC_VER_34:
|
case RTL_GIGA_MAC_VER_34:
|
||||||
case RTL_GIGA_MAC_VER_37 ... RTL_GIGA_MAC_VER_52:
|
case RTL_GIGA_MAC_VER_37 ... RTL_GIGA_MAC_VER_61:
|
||||||
RTL_W32(tp, RxConfig, RTL_R32(tp, RxConfig) |
|
RTL_W32(tp, RxConfig, RTL_R32(tp, RxConfig) |
|
||||||
AcceptBroadcast | AcceptMulticast | AcceptMyPhys);
|
AcceptBroadcast | AcceptMulticast | AcceptMyPhys);
|
||||||
break;
|
break;
|
||||||
|
@ -3896,7 +3896,7 @@ static void rtl_hw_jumbo_disable(struct rtl8169_private *tp)
|
||||||
case RTL_GIGA_MAC_VER_27 ... RTL_GIGA_MAC_VER_28:
|
case RTL_GIGA_MAC_VER_27 ... RTL_GIGA_MAC_VER_28:
|
||||||
r8168dp_hw_jumbo_disable(tp);
|
r8168dp_hw_jumbo_disable(tp);
|
||||||
break;
|
break;
|
||||||
case RTL_GIGA_MAC_VER_31 ... RTL_GIGA_MAC_VER_34:
|
case RTL_GIGA_MAC_VER_31 ... RTL_GIGA_MAC_VER_33:
|
||||||
r8168e_hw_jumbo_disable(tp);
|
r8168e_hw_jumbo_disable(tp);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
|
|
|
@ -2009,6 +2009,8 @@ static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
|
||||||
tx_q->cur_tx = 0;
|
tx_q->cur_tx = 0;
|
||||||
tx_q->mss = 0;
|
tx_q->mss = 0;
|
||||||
netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, chan));
|
netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, chan));
|
||||||
|
stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
|
||||||
|
tx_q->dma_tx_phy, chan);
|
||||||
stmmac_start_tx_dma(priv, chan);
|
stmmac_start_tx_dma(priv, chan);
|
||||||
|
|
||||||
priv->dev->stats.tx_errors++;
|
priv->dev->stats.tx_errors++;
|
||||||
|
|
|
@ -62,7 +62,7 @@ config TI_CPSW
|
||||||
config TI_CPSW_SWITCHDEV
|
config TI_CPSW_SWITCHDEV
|
||||||
tristate "TI CPSW Switch Support with switchdev"
|
tristate "TI CPSW Switch Support with switchdev"
|
||||||
depends on ARCH_DAVINCI || ARCH_OMAP2PLUS || COMPILE_TEST
|
depends on ARCH_DAVINCI || ARCH_OMAP2PLUS || COMPILE_TEST
|
||||||
select NET_SWITCHDEV
|
depends on NET_SWITCHDEV
|
||||||
select TI_DAVINCI_MDIO
|
select TI_DAVINCI_MDIO
|
||||||
select MFD_SYSCON
|
select MFD_SYSCON
|
||||||
select REGMAP
|
select REGMAP
|
||||||
|
|
|
@ -100,8 +100,8 @@ irqreturn_t cpsw_rx_interrupt(int irq, void *dev_id)
|
||||||
{
|
{
|
||||||
struct cpsw_common *cpsw = dev_id;
|
struct cpsw_common *cpsw = dev_id;
|
||||||
|
|
||||||
cpdma_ctlr_eoi(cpsw->dma, CPDMA_EOI_RX);
|
|
||||||
writel(0, &cpsw->wr_regs->rx_en);
|
writel(0, &cpsw->wr_regs->rx_en);
|
||||||
|
cpdma_ctlr_eoi(cpsw->dma, CPDMA_EOI_RX);
|
||||||
|
|
||||||
if (cpsw->quirk_irq) {
|
if (cpsw->quirk_irq) {
|
||||||
disable_irq_nosync(cpsw->irqs_table[0]);
|
disable_irq_nosync(cpsw->irqs_table[0]);
|
||||||
|
|
|
@ -853,7 +853,9 @@ static struct dst_entry *geneve_get_v6_dst(struct sk_buff *skb,
|
||||||
if (dst)
|
if (dst)
|
||||||
return dst;
|
return dst;
|
||||||
}
|
}
|
||||||
if (ipv6_stub->ipv6_dst_lookup(geneve->net, gs6->sock->sk, &dst, fl6)) {
|
dst = ipv6_stub->ipv6_dst_lookup_flow(geneve->net, gs6->sock->sk, fl6,
|
||||||
|
NULL);
|
||||||
|
if (IS_ERR(dst)) {
|
||||||
netdev_dbg(dev, "no route to %pI6\n", &fl6->daddr);
|
netdev_dbg(dev, "no route to %pI6\n", &fl6->daddr);
|
||||||
return ERR_PTR(-ENETUNREACH);
|
return ERR_PTR(-ENETUNREACH);
|
||||||
}
|
}
|
||||||
|
|
|
@ -101,8 +101,11 @@
|
||||||
/* RGMIIDCTL bits */
|
/* RGMIIDCTL bits */
|
||||||
#define DP83867_RGMII_TX_CLK_DELAY_MAX 0xf
|
#define DP83867_RGMII_TX_CLK_DELAY_MAX 0xf
|
||||||
#define DP83867_RGMII_TX_CLK_DELAY_SHIFT 4
|
#define DP83867_RGMII_TX_CLK_DELAY_SHIFT 4
|
||||||
|
#define DP83867_RGMII_TX_CLK_DELAY_INV (DP83867_RGMII_TX_CLK_DELAY_MAX + 1)
|
||||||
#define DP83867_RGMII_RX_CLK_DELAY_MAX 0xf
|
#define DP83867_RGMII_RX_CLK_DELAY_MAX 0xf
|
||||||
#define DP83867_RGMII_RX_CLK_DELAY_SHIFT 0
|
#define DP83867_RGMII_RX_CLK_DELAY_SHIFT 0
|
||||||
|
#define DP83867_RGMII_RX_CLK_DELAY_INV (DP83867_RGMII_RX_CLK_DELAY_MAX + 1)
|
||||||
|
|
||||||
|
|
||||||
/* IO_MUX_CFG bits */
|
/* IO_MUX_CFG bits */
|
||||||
#define DP83867_IO_MUX_CFG_IO_IMPEDANCE_MASK 0x1f
|
#define DP83867_IO_MUX_CFG_IO_IMPEDANCE_MASK 0x1f
|
||||||
|
@ -294,6 +297,48 @@ static int dp83867_config_port_mirroring(struct phy_device *phydev)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int dp83867_verify_rgmii_cfg(struct phy_device *phydev)
|
||||||
|
{
|
||||||
|
struct dp83867_private *dp83867 = phydev->priv;
|
||||||
|
|
||||||
|
/* Existing behavior was to use default pin strapping delay in rgmii
|
||||||
|
* mode, but rgmii should have meant no delay. Warn existing users.
|
||||||
|
*/
|
||||||
|
if (phydev->interface == PHY_INTERFACE_MODE_RGMII) {
|
||||||
|
const u16 val = phy_read_mmd(phydev, DP83867_DEVADDR,
|
||||||
|
DP83867_STRAP_STS2);
|
||||||
|
const u16 txskew = (val & DP83867_STRAP_STS2_CLK_SKEW_TX_MASK) >>
|
||||||
|
DP83867_STRAP_STS2_CLK_SKEW_TX_SHIFT;
|
||||||
|
const u16 rxskew = (val & DP83867_STRAP_STS2_CLK_SKEW_RX_MASK) >>
|
||||||
|
DP83867_STRAP_STS2_CLK_SKEW_RX_SHIFT;
|
||||||
|
|
||||||
|
if (txskew != DP83867_STRAP_STS2_CLK_SKEW_NONE ||
|
||||||
|
rxskew != DP83867_STRAP_STS2_CLK_SKEW_NONE)
|
||||||
|
phydev_warn(phydev,
|
||||||
|
"PHY has delays via pin strapping, but phy-mode = 'rgmii'\n"
|
||||||
|
"Should be 'rgmii-id' to use internal delays txskew:%x rxskew:%x\n",
|
||||||
|
txskew, rxskew);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* RX delay *must* be specified if internal delay of RX is used. */
|
||||||
|
if ((phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
|
||||||
|
phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) &&
|
||||||
|
dp83867->rx_id_delay == DP83867_RGMII_RX_CLK_DELAY_INV) {
|
||||||
|
phydev_err(phydev, "ti,rx-internal-delay must be specified\n");
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* TX delay *must* be specified if internal delay of TX is used. */
|
||||||
|
if ((phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
|
||||||
|
phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) &&
|
||||||
|
dp83867->tx_id_delay == DP83867_RGMII_TX_CLK_DELAY_INV) {
|
||||||
|
phydev_err(phydev, "ti,tx-internal-delay must be specified\n");
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_OF_MDIO
|
#ifdef CONFIG_OF_MDIO
|
||||||
static int dp83867_of_init(struct phy_device *phydev)
|
static int dp83867_of_init(struct phy_device *phydev)
|
||||||
{
|
{
|
||||||
|
@ -335,55 +380,25 @@ static int dp83867_of_init(struct phy_device *phydev)
|
||||||
dp83867->sgmii_ref_clk_en = of_property_read_bool(of_node,
|
dp83867->sgmii_ref_clk_en = of_property_read_bool(of_node,
|
||||||
"ti,sgmii-ref-clock-output-enable");
|
"ti,sgmii-ref-clock-output-enable");
|
||||||
|
|
||||||
/* Existing behavior was to use default pin strapping delay in rgmii
|
|
||||||
* mode, but rgmii should have meant no delay. Warn existing users.
|
|
||||||
*/
|
|
||||||
if (phydev->interface == PHY_INTERFACE_MODE_RGMII) {
|
|
||||||
const u16 val = phy_read_mmd(phydev, DP83867_DEVADDR, DP83867_STRAP_STS2);
|
|
||||||
const u16 txskew = (val & DP83867_STRAP_STS2_CLK_SKEW_TX_MASK) >>
|
|
||||||
DP83867_STRAP_STS2_CLK_SKEW_TX_SHIFT;
|
|
||||||
const u16 rxskew = (val & DP83867_STRAP_STS2_CLK_SKEW_RX_MASK) >>
|
|
||||||
DP83867_STRAP_STS2_CLK_SKEW_RX_SHIFT;
|
|
||||||
|
|
||||||
if (txskew != DP83867_STRAP_STS2_CLK_SKEW_NONE ||
|
dp83867->rx_id_delay = DP83867_RGMII_RX_CLK_DELAY_INV;
|
||||||
rxskew != DP83867_STRAP_STS2_CLK_SKEW_NONE)
|
ret = of_property_read_u32(of_node, "ti,rx-internal-delay",
|
||||||
phydev_warn(phydev,
|
&dp83867->rx_id_delay);
|
||||||
"PHY has delays via pin strapping, but phy-mode = 'rgmii'\n"
|
if (!ret && dp83867->rx_id_delay > DP83867_RGMII_RX_CLK_DELAY_MAX) {
|
||||||
"Should be 'rgmii-id' to use internal delays\n");
|
phydev_err(phydev,
|
||||||
|
"ti,rx-internal-delay value of %u out of range\n",
|
||||||
|
dp83867->rx_id_delay);
|
||||||
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* RX delay *must* be specified if internal delay of RX is used. */
|
dp83867->tx_id_delay = DP83867_RGMII_TX_CLK_DELAY_INV;
|
||||||
if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
|
ret = of_property_read_u32(of_node, "ti,tx-internal-delay",
|
||||||
phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) {
|
&dp83867->tx_id_delay);
|
||||||
ret = of_property_read_u32(of_node, "ti,rx-internal-delay",
|
if (!ret && dp83867->tx_id_delay > DP83867_RGMII_TX_CLK_DELAY_MAX) {
|
||||||
&dp83867->rx_id_delay);
|
phydev_err(phydev,
|
||||||
if (ret) {
|
"ti,tx-internal-delay value of %u out of range\n",
|
||||||
phydev_err(phydev, "ti,rx-internal-delay must be specified\n");
|
dp83867->tx_id_delay);
|
||||||
return ret;
|
return -EINVAL;
|
||||||
}
|
|
||||||
if (dp83867->rx_id_delay > DP83867_RGMII_RX_CLK_DELAY_MAX) {
|
|
||||||
phydev_err(phydev,
|
|
||||||
"ti,rx-internal-delay value of %u out of range\n",
|
|
||||||
dp83867->rx_id_delay);
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* TX delay *must* be specified if internal delay of RX is used. */
|
|
||||||
if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
|
|
||||||
phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) {
|
|
||||||
ret = of_property_read_u32(of_node, "ti,tx-internal-delay",
|
|
||||||
&dp83867->tx_id_delay);
|
|
||||||
if (ret) {
|
|
||||||
phydev_err(phydev, "ti,tx-internal-delay must be specified\n");
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
if (dp83867->tx_id_delay > DP83867_RGMII_TX_CLK_DELAY_MAX) {
|
|
||||||
phydev_err(phydev,
|
|
||||||
"ti,tx-internal-delay value of %u out of range\n",
|
|
||||||
dp83867->tx_id_delay);
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (of_property_read_bool(of_node, "enet-phy-lane-swap"))
|
if (of_property_read_bool(of_node, "enet-phy-lane-swap"))
|
||||||
|
@ -434,6 +449,10 @@ static int dp83867_config_init(struct phy_device *phydev)
|
||||||
int ret, val, bs;
|
int ret, val, bs;
|
||||||
u16 delay;
|
u16 delay;
|
||||||
|
|
||||||
|
ret = dp83867_verify_rgmii_cfg(phydev);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
/* RX_DV/RX_CTRL strapped in mode 1 or mode 2 workaround */
|
/* RX_DV/RX_CTRL strapped in mode 1 or mode 2 workaround */
|
||||||
if (dp83867->rxctrl_strap_quirk)
|
if (dp83867->rxctrl_strap_quirk)
|
||||||
phy_clear_bits_mmd(phydev, DP83867_DEVADDR, DP83867_CFG4,
|
phy_clear_bits_mmd(phydev, DP83867_DEVADDR, DP83867_CFG4,
|
||||||
|
@ -485,8 +504,12 @@ static int dp83867_config_init(struct phy_device *phydev)
|
||||||
|
|
||||||
phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_RGMIICTL, val);
|
phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_RGMIICTL, val);
|
||||||
|
|
||||||
delay = (dp83867->rx_id_delay |
|
delay = 0;
|
||||||
(dp83867->tx_id_delay << DP83867_RGMII_TX_CLK_DELAY_SHIFT));
|
if (dp83867->rx_id_delay != DP83867_RGMII_RX_CLK_DELAY_INV)
|
||||||
|
delay |= dp83867->rx_id_delay;
|
||||||
|
if (dp83867->tx_id_delay != DP83867_RGMII_TX_CLK_DELAY_INV)
|
||||||
|
delay |= dp83867->tx_id_delay <<
|
||||||
|
DP83867_RGMII_TX_CLK_DELAY_SHIFT;
|
||||||
|
|
||||||
phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_RGMIIDCTL,
|
phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_RGMIIDCTL,
|
||||||
delay);
|
delay);
|
||||||
|
|
|
@ -129,6 +129,7 @@ static void thunder_mdiobus_pci_remove(struct pci_dev *pdev)
|
||||||
mdiobus_free(bus->mii_bus);
|
mdiobus_free(bus->mii_bus);
|
||||||
oct_mdio_writeq(0, bus->register_base + SMI_EN);
|
oct_mdio_writeq(0, bus->register_base + SMI_EN);
|
||||||
}
|
}
|
||||||
|
pci_release_regions(pdev);
|
||||||
pci_set_drvdata(pdev, NULL);
|
pci_set_drvdata(pdev, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1754,6 +1754,10 @@ static void sfp_sm_module(struct sfp *sfp, unsigned int event)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
err = sfp_hwmon_insert(sfp);
|
||||||
|
if (err)
|
||||||
|
dev_warn(sfp->dev, "hwmon probe failed: %d\n", err);
|
||||||
|
|
||||||
sfp_sm_mod_next(sfp, SFP_MOD_WAITDEV, 0);
|
sfp_sm_mod_next(sfp, SFP_MOD_WAITDEV, 0);
|
||||||
/* fall through */
|
/* fall through */
|
||||||
case SFP_MOD_WAITDEV:
|
case SFP_MOD_WAITDEV:
|
||||||
|
@ -1803,15 +1807,6 @@ static void sfp_sm_module(struct sfp *sfp, unsigned int event)
|
||||||
case SFP_MOD_ERROR:
|
case SFP_MOD_ERROR:
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
#if IS_ENABLED(CONFIG_HWMON)
|
|
||||||
if (sfp->sm_mod_state >= SFP_MOD_WAITDEV &&
|
|
||||||
IS_ERR_OR_NULL(sfp->hwmon_dev)) {
|
|
||||||
err = sfp_hwmon_insert(sfp);
|
|
||||||
if (err)
|
|
||||||
dev_warn(sfp->dev, "hwmon probe failed: %d\n", err);
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void sfp_sm_main(struct sfp *sfp, unsigned int event)
|
static void sfp_sm_main(struct sfp *sfp, unsigned int event)
|
||||||
|
@ -2294,6 +2289,10 @@ static int sfp_remove(struct platform_device *pdev)
|
||||||
|
|
||||||
sfp_unregister_socket(sfp->sfp_bus);
|
sfp_unregister_socket(sfp->sfp_bus);
|
||||||
|
|
||||||
|
rtnl_lock();
|
||||||
|
sfp_sm_event(sfp, SFP_E_REMOVE);
|
||||||
|
rtnl_unlock();
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -564,8 +564,9 @@ static struct bpf_prog *get_filter(struct sock_fprog *uprog)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
/* uprog->len is unsigned short, so no overflow here */
|
/* uprog->len is unsigned short, so no overflow here */
|
||||||
fprog.len = uprog->len * sizeof(struct sock_filter);
|
fprog.len = uprog->len;
|
||||||
fprog.filter = memdup_user(uprog->filter, fprog.len);
|
fprog.filter = memdup_user(uprog->filter,
|
||||||
|
uprog->len * sizeof(struct sock_filter));
|
||||||
if (IS_ERR(fprog.filter))
|
if (IS_ERR(fprog.filter))
|
||||||
return ERR_CAST(fprog.filter);
|
return ERR_CAST(fprog.filter);
|
||||||
|
|
||||||
|
|
|
@ -119,8 +119,6 @@ static inline bool stage_session(__be16 sid)
|
||||||
|
|
||||||
static inline struct pppoe_net *pppoe_pernet(struct net *net)
|
static inline struct pppoe_net *pppoe_pernet(struct net *net)
|
||||||
{
|
{
|
||||||
BUG_ON(!net);
|
|
||||||
|
|
||||||
return net_generic(net, pppoe_net_id);
|
return net_generic(net, pppoe_net_id);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -2275,7 +2275,6 @@ static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan,
|
||||||
bool use_cache = ip_tunnel_dst_cache_usable(skb, info);
|
bool use_cache = ip_tunnel_dst_cache_usable(skb, info);
|
||||||
struct dst_entry *ndst;
|
struct dst_entry *ndst;
|
||||||
struct flowi6 fl6;
|
struct flowi6 fl6;
|
||||||
int err;
|
|
||||||
|
|
||||||
if (!sock6)
|
if (!sock6)
|
||||||
return ERR_PTR(-EIO);
|
return ERR_PTR(-EIO);
|
||||||
|
@ -2298,10 +2297,9 @@ static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan,
|
||||||
fl6.fl6_dport = dport;
|
fl6.fl6_dport = dport;
|
||||||
fl6.fl6_sport = sport;
|
fl6.fl6_sport = sport;
|
||||||
|
|
||||||
err = ipv6_stub->ipv6_dst_lookup(vxlan->net,
|
ndst = ipv6_stub->ipv6_dst_lookup_flow(vxlan->net, sock6->sock->sk,
|
||||||
sock6->sock->sk,
|
&fl6, NULL);
|
||||||
&ndst, &fl6);
|
if (unlikely(IS_ERR(ndst))) {
|
||||||
if (unlikely(err < 0)) {
|
|
||||||
netdev_dbg(dev, "no route to %pI6\n", daddr);
|
netdev_dbg(dev, "no route to %pI6\n", daddr);
|
||||||
return ERR_PTR(-ENETUNREACH);
|
return ERR_PTR(-ENETUNREACH);
|
||||||
}
|
}
|
||||||
|
|
|
@ -480,6 +480,7 @@ struct qeth_card_stats {
|
||||||
|
|
||||||
u64 rx_dropped_nomem;
|
u64 rx_dropped_nomem;
|
||||||
u64 rx_dropped_notsupp;
|
u64 rx_dropped_notsupp;
|
||||||
|
u64 rx_dropped_runt;
|
||||||
|
|
||||||
/* rtnl_link_stats64 */
|
/* rtnl_link_stats64 */
|
||||||
u64 rx_packets;
|
u64 rx_packets;
|
||||||
|
@ -627,6 +628,7 @@ struct qeth_ipato {
|
||||||
|
|
||||||
struct qeth_channel {
|
struct qeth_channel {
|
||||||
struct ccw_device *ccwdev;
|
struct ccw_device *ccwdev;
|
||||||
|
struct qeth_cmd_buffer *active_cmd;
|
||||||
enum qeth_channel_states state;
|
enum qeth_channel_states state;
|
||||||
atomic_t irq_pending;
|
atomic_t irq_pending;
|
||||||
};
|
};
|
||||||
|
@ -1037,6 +1039,8 @@ int qeth_do_run_thread(struct qeth_card *, unsigned long);
|
||||||
void qeth_clear_thread_start_bit(struct qeth_card *, unsigned long);
|
void qeth_clear_thread_start_bit(struct qeth_card *, unsigned long);
|
||||||
void qeth_clear_thread_running_bit(struct qeth_card *, unsigned long);
|
void qeth_clear_thread_running_bit(struct qeth_card *, unsigned long);
|
||||||
int qeth_core_hardsetup_card(struct qeth_card *card, bool *carrier_ok);
|
int qeth_core_hardsetup_card(struct qeth_card *card, bool *carrier_ok);
|
||||||
|
int qeth_stop_channel(struct qeth_channel *channel);
|
||||||
|
|
||||||
void qeth_print_status_message(struct qeth_card *);
|
void qeth_print_status_message(struct qeth_card *);
|
||||||
int qeth_init_qdio_queues(struct qeth_card *);
|
int qeth_init_qdio_queues(struct qeth_card *);
|
||||||
int qeth_send_ipa_cmd(struct qeth_card *, struct qeth_cmd_buffer *,
|
int qeth_send_ipa_cmd(struct qeth_card *, struct qeth_cmd_buffer *,
|
||||||
|
|
|
@ -515,7 +515,9 @@ static int __qeth_issue_next_read(struct qeth_card *card)
|
||||||
|
|
||||||
QETH_CARD_TEXT(card, 6, "noirqpnd");
|
QETH_CARD_TEXT(card, 6, "noirqpnd");
|
||||||
rc = ccw_device_start(channel->ccwdev, ccw, (addr_t) iob, 0, 0);
|
rc = ccw_device_start(channel->ccwdev, ccw, (addr_t) iob, 0, 0);
|
||||||
if (rc) {
|
if (!rc) {
|
||||||
|
channel->active_cmd = iob;
|
||||||
|
} else {
|
||||||
QETH_DBF_MESSAGE(2, "error %i on device %x when starting next read ccw!\n",
|
QETH_DBF_MESSAGE(2, "error %i on device %x when starting next read ccw!\n",
|
||||||
rc, CARD_DEVID(card));
|
rc, CARD_DEVID(card));
|
||||||
atomic_set(&channel->irq_pending, 0);
|
atomic_set(&channel->irq_pending, 0);
|
||||||
|
@ -986,8 +988,21 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
|
||||||
QETH_CARD_TEXT(card, 5, "data");
|
QETH_CARD_TEXT(card, 5, "data");
|
||||||
}
|
}
|
||||||
|
|
||||||
if (qeth_intparm_is_iob(intparm))
|
if (intparm == 0) {
|
||||||
iob = (struct qeth_cmd_buffer *) __va((addr_t)intparm);
|
QETH_CARD_TEXT(card, 5, "irqunsol");
|
||||||
|
} else if ((addr_t)intparm != (addr_t)channel->active_cmd) {
|
||||||
|
QETH_CARD_TEXT(card, 5, "irqunexp");
|
||||||
|
|
||||||
|
dev_err(&cdev->dev,
|
||||||
|
"Received IRQ with intparm %lx, expected %px\n",
|
||||||
|
intparm, channel->active_cmd);
|
||||||
|
if (channel->active_cmd)
|
||||||
|
qeth_cancel_cmd(channel->active_cmd, -EIO);
|
||||||
|
} else {
|
||||||
|
iob = (struct qeth_cmd_buffer *) (addr_t)intparm;
|
||||||
|
}
|
||||||
|
|
||||||
|
channel->active_cmd = NULL;
|
||||||
|
|
||||||
rc = qeth_check_irb_error(card, cdev, irb);
|
rc = qeth_check_irb_error(card, cdev, irb);
|
||||||
if (rc) {
|
if (rc) {
|
||||||
|
@ -1007,15 +1022,10 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
|
||||||
if (irb->scsw.cmd.fctl & (SCSW_FCTL_HALT_FUNC))
|
if (irb->scsw.cmd.fctl & (SCSW_FCTL_HALT_FUNC))
|
||||||
channel->state = CH_STATE_HALTED;
|
channel->state = CH_STATE_HALTED;
|
||||||
|
|
||||||
if (intparm == QETH_CLEAR_CHANNEL_PARM) {
|
if (iob && (irb->scsw.cmd.fctl & (SCSW_FCTL_CLEAR_FUNC |
|
||||||
QETH_CARD_TEXT(card, 6, "clrchpar");
|
SCSW_FCTL_HALT_FUNC))) {
|
||||||
/* we don't have to handle this further */
|
qeth_cancel_cmd(iob, -ECANCELED);
|
||||||
intparm = 0;
|
iob = NULL;
|
||||||
}
|
|
||||||
if (intparm == QETH_HALT_CHANNEL_PARM) {
|
|
||||||
QETH_CARD_TEXT(card, 6, "hltchpar");
|
|
||||||
/* we don't have to handle this further */
|
|
||||||
intparm = 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
cstat = irb->scsw.cmd.cstat;
|
cstat = irb->scsw.cmd.cstat;
|
||||||
|
@ -1408,7 +1418,7 @@ static int qeth_clear_channel(struct qeth_card *card,
|
||||||
|
|
||||||
QETH_CARD_TEXT(card, 3, "clearch");
|
QETH_CARD_TEXT(card, 3, "clearch");
|
||||||
spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
|
spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
|
||||||
rc = ccw_device_clear(channel->ccwdev, QETH_CLEAR_CHANNEL_PARM);
|
rc = ccw_device_clear(channel->ccwdev, (addr_t)channel->active_cmd);
|
||||||
spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
|
spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
|
||||||
|
|
||||||
if (rc)
|
if (rc)
|
||||||
|
@ -1430,7 +1440,7 @@ static int qeth_halt_channel(struct qeth_card *card,
|
||||||
|
|
||||||
QETH_CARD_TEXT(card, 3, "haltch");
|
QETH_CARD_TEXT(card, 3, "haltch");
|
||||||
spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
|
spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
|
||||||
rc = ccw_device_halt(channel->ccwdev, QETH_HALT_CHANNEL_PARM);
|
rc = ccw_device_halt(channel->ccwdev, (addr_t)channel->active_cmd);
|
||||||
spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
|
spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
|
||||||
|
|
||||||
if (rc)
|
if (rc)
|
||||||
|
@ -1444,6 +1454,25 @@ static int qeth_halt_channel(struct qeth_card *card,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int qeth_stop_channel(struct qeth_channel *channel)
|
||||||
|
{
|
||||||
|
struct ccw_device *cdev = channel->ccwdev;
|
||||||
|
int rc;
|
||||||
|
|
||||||
|
rc = ccw_device_set_offline(cdev);
|
||||||
|
|
||||||
|
spin_lock_irq(get_ccwdev_lock(cdev));
|
||||||
|
if (channel->active_cmd) {
|
||||||
|
dev_err(&cdev->dev, "Stopped channel while cmd %px was still active\n",
|
||||||
|
channel->active_cmd);
|
||||||
|
channel->active_cmd = NULL;
|
||||||
|
}
|
||||||
|
spin_unlock_irq(get_ccwdev_lock(cdev));
|
||||||
|
|
||||||
|
return rc;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(qeth_stop_channel);
|
||||||
|
|
||||||
static int qeth_halt_channels(struct qeth_card *card)
|
static int qeth_halt_channels(struct qeth_card *card)
|
||||||
{
|
{
|
||||||
int rc1 = 0, rc2 = 0, rc3 = 0;
|
int rc1 = 0, rc2 = 0, rc3 = 0;
|
||||||
|
@ -1746,6 +1775,8 @@ static int qeth_send_control_data(struct qeth_card *card,
|
||||||
spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
|
spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
|
||||||
rc = ccw_device_start_timeout(channel->ccwdev, __ccw_from_cmd(iob),
|
rc = ccw_device_start_timeout(channel->ccwdev, __ccw_from_cmd(iob),
|
||||||
(addr_t) iob, 0, 0, timeout);
|
(addr_t) iob, 0, 0, timeout);
|
||||||
|
if (!rc)
|
||||||
|
channel->active_cmd = iob;
|
||||||
spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
|
spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
|
||||||
if (rc) {
|
if (rc) {
|
||||||
QETH_DBF_MESSAGE(2, "qeth_send_control_data on device %x: ccw_device_start rc = %i\n",
|
QETH_DBF_MESSAGE(2, "qeth_send_control_data on device %x: ccw_device_start rc = %i\n",
|
||||||
|
@ -4667,12 +4698,12 @@ EXPORT_SYMBOL_GPL(qeth_vm_request_mac);
|
||||||
|
|
||||||
static void qeth_determine_capabilities(struct qeth_card *card)
|
static void qeth_determine_capabilities(struct qeth_card *card)
|
||||||
{
|
{
|
||||||
|
struct qeth_channel *channel = &card->data;
|
||||||
|
struct ccw_device *ddev = channel->ccwdev;
|
||||||
int rc;
|
int rc;
|
||||||
struct ccw_device *ddev;
|
|
||||||
int ddev_offline = 0;
|
int ddev_offline = 0;
|
||||||
|
|
||||||
QETH_CARD_TEXT(card, 2, "detcapab");
|
QETH_CARD_TEXT(card, 2, "detcapab");
|
||||||
ddev = CARD_DDEV(card);
|
|
||||||
if (!ddev->online) {
|
if (!ddev->online) {
|
||||||
ddev_offline = 1;
|
ddev_offline = 1;
|
||||||
rc = ccw_device_set_online(ddev);
|
rc = ccw_device_set_online(ddev);
|
||||||
|
@ -4711,7 +4742,7 @@ static void qeth_determine_capabilities(struct qeth_card *card)
|
||||||
|
|
||||||
out_offline:
|
out_offline:
|
||||||
if (ddev_offline == 1)
|
if (ddev_offline == 1)
|
||||||
ccw_device_set_offline(ddev);
|
qeth_stop_channel(channel);
|
||||||
out:
|
out:
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -4911,9 +4942,9 @@ retry:
|
||||||
QETH_DBF_MESSAGE(2, "Retrying to do IDX activates on device %x.\n",
|
QETH_DBF_MESSAGE(2, "Retrying to do IDX activates on device %x.\n",
|
||||||
CARD_DEVID(card));
|
CARD_DEVID(card));
|
||||||
rc = qeth_qdio_clear_card(card, !IS_IQD(card));
|
rc = qeth_qdio_clear_card(card, !IS_IQD(card));
|
||||||
ccw_device_set_offline(CARD_DDEV(card));
|
qeth_stop_channel(&card->data);
|
||||||
ccw_device_set_offline(CARD_WDEV(card));
|
qeth_stop_channel(&card->write);
|
||||||
ccw_device_set_offline(CARD_RDEV(card));
|
qeth_stop_channel(&card->read);
|
||||||
qdio_free(CARD_DDEV(card));
|
qdio_free(CARD_DDEV(card));
|
||||||
rc = ccw_device_set_online(CARD_RDEV(card));
|
rc = ccw_device_set_online(CARD_RDEV(card));
|
||||||
if (rc)
|
if (rc)
|
||||||
|
@ -5028,27 +5059,15 @@ out:
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(qeth_core_hardsetup_card);
|
EXPORT_SYMBOL_GPL(qeth_core_hardsetup_card);
|
||||||
|
|
||||||
static void qeth_create_skb_frag(struct qdio_buffer_element *element,
|
static void qeth_create_skb_frag(struct sk_buff *skb, char *data, int data_len)
|
||||||
struct sk_buff *skb, int offset, int data_len)
|
|
||||||
{
|
{
|
||||||
struct page *page = virt_to_page(element->addr);
|
struct page *page = virt_to_page(data);
|
||||||
unsigned int next_frag;
|
unsigned int next_frag;
|
||||||
|
|
||||||
/* first fill the linear space */
|
|
||||||
if (!skb->len) {
|
|
||||||
unsigned int linear = min(data_len, skb_tailroom(skb));
|
|
||||||
|
|
||||||
skb_put_data(skb, element->addr + offset, linear);
|
|
||||||
data_len -= linear;
|
|
||||||
if (!data_len)
|
|
||||||
return;
|
|
||||||
offset += linear;
|
|
||||||
/* fall through to add page frag for remaining data */
|
|
||||||
}
|
|
||||||
|
|
||||||
next_frag = skb_shinfo(skb)->nr_frags;
|
next_frag = skb_shinfo(skb)->nr_frags;
|
||||||
get_page(page);
|
get_page(page);
|
||||||
skb_add_rx_frag(skb, next_frag, page, offset, data_len, data_len);
|
skb_add_rx_frag(skb, next_frag, page, offset_in_page(data), data_len,
|
||||||
|
data_len);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int qeth_is_last_sbale(struct qdio_buffer_element *sbale)
|
static inline int qeth_is_last_sbale(struct qdio_buffer_element *sbale)
|
||||||
|
@ -5063,13 +5082,12 @@ struct sk_buff *qeth_core_get_next_skb(struct qeth_card *card,
|
||||||
{
|
{
|
||||||
struct qdio_buffer_element *element = *__element;
|
struct qdio_buffer_element *element = *__element;
|
||||||
struct qdio_buffer *buffer = qethbuffer->buffer;
|
struct qdio_buffer *buffer = qethbuffer->buffer;
|
||||||
|
unsigned int linear_len = 0;
|
||||||
int offset = *__offset;
|
int offset = *__offset;
|
||||||
bool use_rx_sg = false;
|
bool use_rx_sg = false;
|
||||||
unsigned int headroom;
|
unsigned int headroom;
|
||||||
struct sk_buff *skb;
|
struct sk_buff *skb;
|
||||||
int skb_len = 0;
|
int skb_len = 0;
|
||||||
void *data_ptr;
|
|
||||||
int data_len;
|
|
||||||
|
|
||||||
next_packet:
|
next_packet:
|
||||||
/* qeth_hdr must not cross element boundaries */
|
/* qeth_hdr must not cross element boundaries */
|
||||||
|
@ -5082,29 +5100,41 @@ next_packet:
|
||||||
*hdr = element->addr + offset;
|
*hdr = element->addr + offset;
|
||||||
|
|
||||||
offset += sizeof(struct qeth_hdr);
|
offset += sizeof(struct qeth_hdr);
|
||||||
|
skb = NULL;
|
||||||
|
|
||||||
switch ((*hdr)->hdr.l2.id) {
|
switch ((*hdr)->hdr.l2.id) {
|
||||||
case QETH_HEADER_TYPE_LAYER2:
|
case QETH_HEADER_TYPE_LAYER2:
|
||||||
skb_len = (*hdr)->hdr.l2.pkt_length;
|
skb_len = (*hdr)->hdr.l2.pkt_length;
|
||||||
|
linear_len = ETH_HLEN;
|
||||||
headroom = 0;
|
headroom = 0;
|
||||||
break;
|
break;
|
||||||
case QETH_HEADER_TYPE_LAYER3:
|
case QETH_HEADER_TYPE_LAYER3:
|
||||||
skb_len = (*hdr)->hdr.l3.length;
|
skb_len = (*hdr)->hdr.l3.length;
|
||||||
if (!IS_LAYER3(card)) {
|
if (!IS_LAYER3(card)) {
|
||||||
QETH_CARD_STAT_INC(card, rx_dropped_notsupp);
|
QETH_CARD_STAT_INC(card, rx_dropped_notsupp);
|
||||||
skb = NULL;
|
|
||||||
goto walk_packet;
|
goto walk_packet;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if ((*hdr)->hdr.l3.flags & QETH_HDR_PASSTHRU) {
|
||||||
|
linear_len = ETH_HLEN;
|
||||||
|
headroom = 0;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
if ((*hdr)->hdr.l3.flags & QETH_HDR_IPV6)
|
||||||
|
linear_len = sizeof(struct ipv6hdr);
|
||||||
|
else
|
||||||
|
linear_len = sizeof(struct iphdr);
|
||||||
headroom = ETH_HLEN;
|
headroom = ETH_HLEN;
|
||||||
break;
|
break;
|
||||||
case QETH_HEADER_TYPE_OSN:
|
case QETH_HEADER_TYPE_OSN:
|
||||||
skb_len = (*hdr)->hdr.osn.pdu_length;
|
skb_len = (*hdr)->hdr.osn.pdu_length;
|
||||||
if (!IS_OSN(card)) {
|
if (!IS_OSN(card)) {
|
||||||
QETH_CARD_STAT_INC(card, rx_dropped_notsupp);
|
QETH_CARD_STAT_INC(card, rx_dropped_notsupp);
|
||||||
skb = NULL;
|
|
||||||
goto walk_packet;
|
goto walk_packet;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
linear_len = skb_len;
|
||||||
headroom = sizeof(struct qeth_hdr);
|
headroom = sizeof(struct qeth_hdr);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
|
@ -5117,8 +5147,10 @@ next_packet:
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!skb_len)
|
if (skb_len < linear_len) {
|
||||||
return NULL;
|
QETH_CARD_STAT_INC(card, rx_dropped_runt);
|
||||||
|
goto walk_packet;
|
||||||
|
}
|
||||||
|
|
||||||
use_rx_sg = (card->options.cq == QETH_CQ_ENABLED) ||
|
use_rx_sg = (card->options.cq == QETH_CQ_ENABLED) ||
|
||||||
((skb_len >= card->options.rx_sg_cb) &&
|
((skb_len >= card->options.rx_sg_cb) &&
|
||||||
|
@ -5130,9 +5162,9 @@ next_packet:
|
||||||
skb = qethbuffer->rx_skb;
|
skb = qethbuffer->rx_skb;
|
||||||
qethbuffer->rx_skb = NULL;
|
qethbuffer->rx_skb = NULL;
|
||||||
} else {
|
} else {
|
||||||
unsigned int linear = (use_rx_sg) ? QETH_RX_PULL_LEN : skb_len;
|
if (!use_rx_sg)
|
||||||
|
linear_len = skb_len;
|
||||||
skb = napi_alloc_skb(&card->napi, linear + headroom);
|
skb = napi_alloc_skb(&card->napi, linear_len + headroom);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!skb)
|
if (!skb)
|
||||||
|
@ -5141,18 +5173,32 @@ next_packet:
|
||||||
skb_reserve(skb, headroom);
|
skb_reserve(skb, headroom);
|
||||||
|
|
||||||
walk_packet:
|
walk_packet:
|
||||||
data_ptr = element->addr + offset;
|
|
||||||
while (skb_len) {
|
while (skb_len) {
|
||||||
data_len = min(skb_len, (int)(element->length - offset));
|
int data_len = min(skb_len, (int)(element->length - offset));
|
||||||
|
char *data = element->addr + offset;
|
||||||
|
|
||||||
if (skb && data_len) {
|
|
||||||
if (use_rx_sg)
|
|
||||||
qeth_create_skb_frag(element, skb, offset,
|
|
||||||
data_len);
|
|
||||||
else
|
|
||||||
skb_put_data(skb, data_ptr, data_len);
|
|
||||||
}
|
|
||||||
skb_len -= data_len;
|
skb_len -= data_len;
|
||||||
|
offset += data_len;
|
||||||
|
|
||||||
|
/* Extract data from current element: */
|
||||||
|
if (skb && data_len) {
|
||||||
|
if (linear_len) {
|
||||||
|
unsigned int copy_len;
|
||||||
|
|
||||||
|
copy_len = min_t(unsigned int, linear_len,
|
||||||
|
data_len);
|
||||||
|
|
||||||
|
skb_put_data(skb, data, copy_len);
|
||||||
|
linear_len -= copy_len;
|
||||||
|
data_len -= copy_len;
|
||||||
|
data += copy_len;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (data_len)
|
||||||
|
qeth_create_skb_frag(skb, data, data_len);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Step forward to next element: */
|
||||||
if (skb_len) {
|
if (skb_len) {
|
||||||
if (qeth_is_last_sbale(element)) {
|
if (qeth_is_last_sbale(element)) {
|
||||||
QETH_CARD_TEXT(card, 4, "unexeob");
|
QETH_CARD_TEXT(card, 4, "unexeob");
|
||||||
|
@ -5166,9 +5212,6 @@ walk_packet:
|
||||||
}
|
}
|
||||||
element++;
|
element++;
|
||||||
offset = 0;
|
offset = 0;
|
||||||
data_ptr = element->addr;
|
|
||||||
} else {
|
|
||||||
offset += data_len;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -6268,7 +6311,8 @@ void qeth_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
|
||||||
card->stats.rx_frame_errors +
|
card->stats.rx_frame_errors +
|
||||||
card->stats.rx_fifo_errors;
|
card->stats.rx_fifo_errors;
|
||||||
stats->rx_dropped = card->stats.rx_dropped_nomem +
|
stats->rx_dropped = card->stats.rx_dropped_nomem +
|
||||||
card->stats.rx_dropped_notsupp;
|
card->stats.rx_dropped_notsupp +
|
||||||
|
card->stats.rx_dropped_runt;
|
||||||
stats->multicast = card->stats.rx_multicast;
|
stats->multicast = card->stats.rx_multicast;
|
||||||
stats->rx_length_errors = card->stats.rx_length_errors;
|
stats->rx_length_errors = card->stats.rx_length_errors;
|
||||||
stats->rx_frame_errors = card->stats.rx_frame_errors;
|
stats->rx_frame_errors = card->stats.rx_frame_errors;
|
||||||
|
|
|
@ -29,20 +29,6 @@ extern unsigned char IPA_PDU_HEADER[];
|
||||||
#define QETH_TIMEOUT (10 * HZ)
|
#define QETH_TIMEOUT (10 * HZ)
|
||||||
#define QETH_IPA_TIMEOUT (45 * HZ)
|
#define QETH_IPA_TIMEOUT (45 * HZ)
|
||||||
|
|
||||||
#define QETH_CLEAR_CHANNEL_PARM -10
|
|
||||||
#define QETH_HALT_CHANNEL_PARM -11
|
|
||||||
|
|
||||||
static inline bool qeth_intparm_is_iob(unsigned long intparm)
|
|
||||||
{
|
|
||||||
switch (intparm) {
|
|
||||||
case QETH_CLEAR_CHANNEL_PARM:
|
|
||||||
case QETH_HALT_CHANNEL_PARM:
|
|
||||||
case 0:
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*****************************************************************************/
|
/*****************************************************************************/
|
||||||
/* IP Assist related definitions */
|
/* IP Assist related definitions */
|
||||||
/*****************************************************************************/
|
/*****************************************************************************/
|
||||||
|
|
|
@ -51,6 +51,7 @@ static const struct qeth_stats card_stats[] = {
|
||||||
QETH_CARD_STAT("rx0 SG page allocs", rx_sg_alloc_page),
|
QETH_CARD_STAT("rx0 SG page allocs", rx_sg_alloc_page),
|
||||||
QETH_CARD_STAT("rx0 dropped, no memory", rx_dropped_nomem),
|
QETH_CARD_STAT("rx0 dropped, no memory", rx_dropped_nomem),
|
||||||
QETH_CARD_STAT("rx0 dropped, bad format", rx_dropped_notsupp),
|
QETH_CARD_STAT("rx0 dropped, bad format", rx_dropped_notsupp),
|
||||||
|
QETH_CARD_STAT("rx0 dropped, runt", rx_dropped_runt),
|
||||||
};
|
};
|
||||||
|
|
||||||
#define TXQ_STATS_LEN ARRAY_SIZE(txq_stats)
|
#define TXQ_STATS_LEN ARRAY_SIZE(txq_stats)
|
||||||
|
|
|
@ -845,9 +845,9 @@ static int qeth_l2_set_online(struct ccwgroup_device *gdev)
|
||||||
|
|
||||||
out_remove:
|
out_remove:
|
||||||
qeth_l2_stop_card(card);
|
qeth_l2_stop_card(card);
|
||||||
ccw_device_set_offline(CARD_DDEV(card));
|
qeth_stop_channel(&card->data);
|
||||||
ccw_device_set_offline(CARD_WDEV(card));
|
qeth_stop_channel(&card->write);
|
||||||
ccw_device_set_offline(CARD_RDEV(card));
|
qeth_stop_channel(&card->read);
|
||||||
qdio_free(CARD_DDEV(card));
|
qdio_free(CARD_DDEV(card));
|
||||||
|
|
||||||
mutex_unlock(&card->conf_mutex);
|
mutex_unlock(&card->conf_mutex);
|
||||||
|
@ -878,9 +878,9 @@ static int __qeth_l2_set_offline(struct ccwgroup_device *cgdev,
|
||||||
rtnl_unlock();
|
rtnl_unlock();
|
||||||
|
|
||||||
qeth_l2_stop_card(card);
|
qeth_l2_stop_card(card);
|
||||||
rc = ccw_device_set_offline(CARD_DDEV(card));
|
rc = qeth_stop_channel(&card->data);
|
||||||
rc2 = ccw_device_set_offline(CARD_WDEV(card));
|
rc2 = qeth_stop_channel(&card->write);
|
||||||
rc3 = ccw_device_set_offline(CARD_RDEV(card));
|
rc3 = qeth_stop_channel(&card->read);
|
||||||
if (!rc)
|
if (!rc)
|
||||||
rc = (rc2) ? rc2 : rc3;
|
rc = (rc2) ? rc2 : rc3;
|
||||||
if (rc)
|
if (rc)
|
||||||
|
|
|
@ -2259,9 +2259,9 @@ static int qeth_l3_set_online(struct ccwgroup_device *gdev)
|
||||||
return 0;
|
return 0;
|
||||||
out_remove:
|
out_remove:
|
||||||
qeth_l3_stop_card(card);
|
qeth_l3_stop_card(card);
|
||||||
ccw_device_set_offline(CARD_DDEV(card));
|
qeth_stop_channel(&card->data);
|
||||||
ccw_device_set_offline(CARD_WDEV(card));
|
qeth_stop_channel(&card->write);
|
||||||
ccw_device_set_offline(CARD_RDEV(card));
|
qeth_stop_channel(&card->read);
|
||||||
qdio_free(CARD_DDEV(card));
|
qdio_free(CARD_DDEV(card));
|
||||||
|
|
||||||
mutex_unlock(&card->conf_mutex);
|
mutex_unlock(&card->conf_mutex);
|
||||||
|
@ -2297,9 +2297,10 @@ static int __qeth_l3_set_offline(struct ccwgroup_device *cgdev,
|
||||||
call_netdevice_notifiers(NETDEV_REBOOT, card->dev);
|
call_netdevice_notifiers(NETDEV_REBOOT, card->dev);
|
||||||
rtnl_unlock();
|
rtnl_unlock();
|
||||||
}
|
}
|
||||||
rc = ccw_device_set_offline(CARD_DDEV(card));
|
|
||||||
rc2 = ccw_device_set_offline(CARD_WDEV(card));
|
rc = qeth_stop_channel(&card->data);
|
||||||
rc3 = ccw_device_set_offline(CARD_RDEV(card));
|
rc2 = qeth_stop_channel(&card->write);
|
||||||
|
rc3 = qeth_stop_channel(&card->read);
|
||||||
if (!rc)
|
if (!rc)
|
||||||
rc = (rc2) ? rc2 : rc3;
|
rc = (rc2) ? rc2 : rc3;
|
||||||
if (rc)
|
if (rc)
|
||||||
|
|
|
@ -480,7 +480,9 @@ static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
|
||||||
virtio_transport_deliver_tap_pkt(pkt);
|
virtio_transport_deliver_tap_pkt(pkt);
|
||||||
|
|
||||||
/* Only accept correctly addressed packets */
|
/* Only accept correctly addressed packets */
|
||||||
if (le64_to_cpu(pkt->hdr.src_cid) == vsock->guest_cid)
|
if (le64_to_cpu(pkt->hdr.src_cid) == vsock->guest_cid &&
|
||||||
|
le64_to_cpu(pkt->hdr.dst_cid) ==
|
||||||
|
vhost_transport_get_local_cid())
|
||||||
virtio_transport_recv_pkt(&vhost_transport, pkt);
|
virtio_transport_recv_pkt(&vhost_transport, pkt);
|
||||||
else
|
else
|
||||||
virtio_transport_free_pkt(pkt);
|
virtio_transport_free_pkt(pkt);
|
||||||
|
|
|
@ -776,8 +776,12 @@ bpf_ctx_narrow_access_offset(u32 off, u32 size, u32 size_default)
|
||||||
|
|
||||||
static inline void bpf_prog_lock_ro(struct bpf_prog *fp)
|
static inline void bpf_prog_lock_ro(struct bpf_prog *fp)
|
||||||
{
|
{
|
||||||
set_vm_flush_reset_perms(fp);
|
#ifndef CONFIG_BPF_JIT_ALWAYS_ON
|
||||||
set_memory_ro((unsigned long)fp, fp->pages);
|
if (!fp->jited) {
|
||||||
|
set_vm_flush_reset_perms(fp);
|
||||||
|
set_memory_ro((unsigned long)fp, fp->pages);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr)
|
static inline void bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr)
|
||||||
|
|
|
@ -1881,6 +1881,11 @@ struct net_device {
|
||||||
unsigned char if_port;
|
unsigned char if_port;
|
||||||
unsigned char dma;
|
unsigned char dma;
|
||||||
|
|
||||||
|
/* Note : dev->mtu is often read without holding a lock.
|
||||||
|
* Writers usually hold RTNL.
|
||||||
|
* It is recommended to use READ_ONCE() to annotate the reads,
|
||||||
|
* and to use WRITE_ONCE() to annotate the writes.
|
||||||
|
*/
|
||||||
unsigned int mtu;
|
unsigned int mtu;
|
||||||
unsigned int min_mtu;
|
unsigned int min_mtu;
|
||||||
unsigned int max_mtu;
|
unsigned int max_mtu;
|
||||||
|
|
|
@ -3529,8 +3529,9 @@ int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci);
|
||||||
int skb_vlan_pop(struct sk_buff *skb);
|
int skb_vlan_pop(struct sk_buff *skb);
|
||||||
int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci);
|
int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci);
|
||||||
int skb_mpls_push(struct sk_buff *skb, __be32 mpls_lse, __be16 mpls_proto,
|
int skb_mpls_push(struct sk_buff *skb, __be32 mpls_lse, __be16 mpls_proto,
|
||||||
int mac_len);
|
int mac_len, bool ethernet);
|
||||||
int skb_mpls_pop(struct sk_buff *skb, __be16 next_proto, int mac_len);
|
int skb_mpls_pop(struct sk_buff *skb, __be16 next_proto, int mac_len,
|
||||||
|
bool ethernet);
|
||||||
int skb_mpls_update_lse(struct sk_buff *skb, __be32 mpls_lse);
|
int skb_mpls_update_lse(struct sk_buff *skb, __be32 mpls_lse);
|
||||||
int skb_mpls_dec_ttl(struct sk_buff *skb);
|
int skb_mpls_dec_ttl(struct sk_buff *skb);
|
||||||
struct sk_buff *pskb_extract(struct sk_buff *skb, int off, int to_copy,
|
struct sk_buff *pskb_extract(struct sk_buff *skb, int off, int to_copy,
|
||||||
|
|
|
@ -97,4 +97,17 @@ static inline bool itimerspec64_valid(const struct itimerspec64 *its)
|
||||||
*/
|
*/
|
||||||
#define time_after32(a, b) ((s32)((u32)(b) - (u32)(a)) < 0)
|
#define time_after32(a, b) ((s32)((u32)(b) - (u32)(a)) < 0)
|
||||||
#define time_before32(b, a) time_after32(a, b)
|
#define time_before32(b, a) time_after32(a, b)
|
||||||
|
|
||||||
|
/**
|
||||||
|
* time_between32 - check if a 32-bit timestamp is within a given time range
|
||||||
|
* @t: the time which may be within [l,h]
|
||||||
|
* @l: the lower bound of the range
|
||||||
|
* @h: the higher bound of the range
|
||||||
|
*
|
||||||
|
* time_before32(t, l, h) returns true if @l <= @t <= @h. All operands are
|
||||||
|
* treated as 32-bit integers.
|
||||||
|
*
|
||||||
|
* Equivalent to !(time_before32(@t, @l) || time_after32(@t, @h)).
|
||||||
|
*/
|
||||||
|
#define time_between32(t, l, h) ((u32)(h) - (u32)(l) >= (u32)(t) - (u32)(l))
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -235,6 +235,7 @@ enum flow_dissector_key_id {
|
||||||
FLOW_DISSECTOR_KEY_IPV4_ADDRS, /* struct flow_dissector_key_ipv4_addrs */
|
FLOW_DISSECTOR_KEY_IPV4_ADDRS, /* struct flow_dissector_key_ipv4_addrs */
|
||||||
FLOW_DISSECTOR_KEY_IPV6_ADDRS, /* struct flow_dissector_key_ipv6_addrs */
|
FLOW_DISSECTOR_KEY_IPV6_ADDRS, /* struct flow_dissector_key_ipv6_addrs */
|
||||||
FLOW_DISSECTOR_KEY_PORTS, /* struct flow_dissector_key_ports */
|
FLOW_DISSECTOR_KEY_PORTS, /* struct flow_dissector_key_ports */
|
||||||
|
FLOW_DISSECTOR_KEY_PORTS_RANGE, /* struct flow_dissector_key_ports */
|
||||||
FLOW_DISSECTOR_KEY_ICMP, /* struct flow_dissector_key_icmp */
|
FLOW_DISSECTOR_KEY_ICMP, /* struct flow_dissector_key_icmp */
|
||||||
FLOW_DISSECTOR_KEY_ETH_ADDRS, /* struct flow_dissector_key_eth_addrs */
|
FLOW_DISSECTOR_KEY_ETH_ADDRS, /* struct flow_dissector_key_eth_addrs */
|
||||||
FLOW_DISSECTOR_KEY_TIPC, /* struct flow_dissector_key_tipc */
|
FLOW_DISSECTOR_KEY_TIPC, /* struct flow_dissector_key_tipc */
|
||||||
|
|
|
@ -380,19 +380,18 @@ static inline void flow_block_init(struct flow_block *flow_block)
|
||||||
typedef int flow_indr_block_bind_cb_t(struct net_device *dev, void *cb_priv,
|
typedef int flow_indr_block_bind_cb_t(struct net_device *dev, void *cb_priv,
|
||||||
enum tc_setup_type type, void *type_data);
|
enum tc_setup_type type, void *type_data);
|
||||||
|
|
||||||
typedef void flow_indr_block_ing_cmd_t(struct net_device *dev,
|
typedef void flow_indr_block_cmd_t(struct net_device *dev,
|
||||||
flow_indr_block_bind_cb_t *cb,
|
flow_indr_block_bind_cb_t *cb, void *cb_priv,
|
||||||
void *cb_priv,
|
enum flow_block_command command);
|
||||||
enum flow_block_command command);
|
|
||||||
|
|
||||||
struct flow_indr_block_ing_entry {
|
struct flow_indr_block_entry {
|
||||||
flow_indr_block_ing_cmd_t *cb;
|
flow_indr_block_cmd_t *cb;
|
||||||
struct list_head list;
|
struct list_head list;
|
||||||
};
|
};
|
||||||
|
|
||||||
void flow_indr_add_block_ing_cb(struct flow_indr_block_ing_entry *entry);
|
void flow_indr_add_block_cb(struct flow_indr_block_entry *entry);
|
||||||
|
|
||||||
void flow_indr_del_block_ing_cb(struct flow_indr_block_ing_entry *entry);
|
void flow_indr_del_block_cb(struct flow_indr_block_entry *entry);
|
||||||
|
|
||||||
int __flow_indr_block_cb_register(struct net_device *dev, void *cb_priv,
|
int __flow_indr_block_cb_register(struct net_device *dev, void *cb_priv,
|
||||||
flow_indr_block_bind_cb_t *cb,
|
flow_indr_block_bind_cb_t *cb,
|
||||||
|
|
|
@ -760,4 +760,9 @@ int ip_misc_proc_init(void);
|
||||||
int rtm_getroute_parse_ip_proto(struct nlattr *attr, u8 *ip_proto, u8 family,
|
int rtm_getroute_parse_ip_proto(struct nlattr *attr, u8 *ip_proto, u8 family,
|
||||||
struct netlink_ext_ack *extack);
|
struct netlink_ext_ack *extack);
|
||||||
|
|
||||||
|
static inline bool inetdev_valid_mtu(unsigned int mtu)
|
||||||
|
{
|
||||||
|
return likely(mtu >= IPV4_MIN_MTU);
|
||||||
|
}
|
||||||
|
|
||||||
#endif /* _IP_H */
|
#endif /* _IP_H */
|
||||||
|
|
|
@ -1022,7 +1022,7 @@ static inline struct sk_buff *ip6_finish_skb(struct sock *sk)
|
||||||
|
|
||||||
int ip6_dst_lookup(struct net *net, struct sock *sk, struct dst_entry **dst,
|
int ip6_dst_lookup(struct net *net, struct sock *sk, struct dst_entry **dst,
|
||||||
struct flowi6 *fl6);
|
struct flowi6 *fl6);
|
||||||
struct dst_entry *ip6_dst_lookup_flow(const struct sock *sk, struct flowi6 *fl6,
|
struct dst_entry *ip6_dst_lookup_flow(struct net *net, const struct sock *sk, struct flowi6 *fl6,
|
||||||
const struct in6_addr *final_dst);
|
const struct in6_addr *final_dst);
|
||||||
struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
|
struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
|
||||||
const struct in6_addr *final_dst,
|
const struct in6_addr *final_dst,
|
||||||
|
|
|
@ -24,8 +24,10 @@ struct ipv6_stub {
|
||||||
const struct in6_addr *addr);
|
const struct in6_addr *addr);
|
||||||
int (*ipv6_sock_mc_drop)(struct sock *sk, int ifindex,
|
int (*ipv6_sock_mc_drop)(struct sock *sk, int ifindex,
|
||||||
const struct in6_addr *addr);
|
const struct in6_addr *addr);
|
||||||
int (*ipv6_dst_lookup)(struct net *net, struct sock *sk,
|
struct dst_entry *(*ipv6_dst_lookup_flow)(struct net *net,
|
||||||
struct dst_entry **dst, struct flowi6 *fl6);
|
const struct sock *sk,
|
||||||
|
struct flowi6 *fl6,
|
||||||
|
const struct in6_addr *final_dst);
|
||||||
int (*ipv6_route_input)(struct sk_buff *skb);
|
int (*ipv6_route_input)(struct sk_buff *skb);
|
||||||
|
|
||||||
struct fib6_table *(*fib6_get_table)(struct net *net, u32 id);
|
struct fib6_table *(*fib6_get_table)(struct net *net, u32 id);
|
||||||
|
|
|
@ -494,15 +494,16 @@ static inline void tcp_synq_overflow(const struct sock *sk)
|
||||||
reuse = rcu_dereference(sk->sk_reuseport_cb);
|
reuse = rcu_dereference(sk->sk_reuseport_cb);
|
||||||
if (likely(reuse)) {
|
if (likely(reuse)) {
|
||||||
last_overflow = READ_ONCE(reuse->synq_overflow_ts);
|
last_overflow = READ_ONCE(reuse->synq_overflow_ts);
|
||||||
if (time_after32(now, last_overflow + HZ))
|
if (!time_between32(now, last_overflow,
|
||||||
|
last_overflow + HZ))
|
||||||
WRITE_ONCE(reuse->synq_overflow_ts, now);
|
WRITE_ONCE(reuse->synq_overflow_ts, now);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp;
|
last_overflow = READ_ONCE(tcp_sk(sk)->rx_opt.ts_recent_stamp);
|
||||||
if (time_after32(now, last_overflow + HZ))
|
if (!time_between32(now, last_overflow, last_overflow + HZ))
|
||||||
tcp_sk(sk)->rx_opt.ts_recent_stamp = now;
|
WRITE_ONCE(tcp_sk(sk)->rx_opt.ts_recent_stamp, now);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* syncookies: no recent synqueue overflow on this listening socket? */
|
/* syncookies: no recent synqueue overflow on this listening socket? */
|
||||||
|
@ -517,13 +518,23 @@ static inline bool tcp_synq_no_recent_overflow(const struct sock *sk)
|
||||||
reuse = rcu_dereference(sk->sk_reuseport_cb);
|
reuse = rcu_dereference(sk->sk_reuseport_cb);
|
||||||
if (likely(reuse)) {
|
if (likely(reuse)) {
|
||||||
last_overflow = READ_ONCE(reuse->synq_overflow_ts);
|
last_overflow = READ_ONCE(reuse->synq_overflow_ts);
|
||||||
return time_after32(now, last_overflow +
|
return !time_between32(now, last_overflow - HZ,
|
||||||
TCP_SYNCOOKIE_VALID);
|
last_overflow +
|
||||||
|
TCP_SYNCOOKIE_VALID);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp;
|
last_overflow = READ_ONCE(tcp_sk(sk)->rx_opt.ts_recent_stamp);
|
||||||
return time_after32(now, last_overflow + TCP_SYNCOOKIE_VALID);
|
|
||||||
|
/* If last_overflow <= jiffies <= last_overflow + TCP_SYNCOOKIE_VALID,
|
||||||
|
* then we're under synflood. However, we have to use
|
||||||
|
* 'last_overflow - HZ' as lower bound. That's because a concurrent
|
||||||
|
* tcp_synq_overflow() could update .ts_recent_stamp after we read
|
||||||
|
* jiffies but before we store .ts_recent_stamp into last_overflow,
|
||||||
|
* which could lead to rejecting a valid syncookie.
|
||||||
|
*/
|
||||||
|
return !time_between32(now, last_overflow - HZ,
|
||||||
|
last_overflow + TCP_SYNCOOKIE_VALID);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline u32 tcp_cookie_time(void)
|
static inline u32 tcp_cookie_time(void)
|
||||||
|
|
|
@ -3463,6 +3463,7 @@ enum {
|
||||||
__ctx_convert##_id,
|
__ctx_convert##_id,
|
||||||
#include <linux/bpf_types.h>
|
#include <linux/bpf_types.h>
|
||||||
#undef BPF_PROG_TYPE
|
#undef BPF_PROG_TYPE
|
||||||
|
__ctx_convert_unused, /* to avoid empty enum in extreme .config */
|
||||||
};
|
};
|
||||||
static u8 bpf_ctx_convert_map[] = {
|
static u8 bpf_ctx_convert_map[] = {
|
||||||
#define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
|
#define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
|
||||||
|
@ -3976,8 +3977,10 @@ static int __get_type_size(struct btf *btf, u32 btf_id,
|
||||||
t = btf_type_by_id(btf, btf_id);
|
t = btf_type_by_id(btf, btf_id);
|
||||||
while (t && btf_type_is_modifier(t))
|
while (t && btf_type_is_modifier(t))
|
||||||
t = btf_type_by_id(btf, t->type);
|
t = btf_type_by_id(btf, t->type);
|
||||||
if (!t)
|
if (!t) {
|
||||||
|
*bad_type = btf->types[0];
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
}
|
||||||
if (btf_type_is_ptr(t))
|
if (btf_type_is_ptr(t))
|
||||||
/* kernel size of pointer. Not BPF's size of pointer*/
|
/* kernel size of pointer. Not BPF's size of pointer*/
|
||||||
return sizeof(void *);
|
return sizeof(void *);
|
||||||
|
|
|
@ -9636,7 +9636,10 @@ static int check_attach_btf_id(struct bpf_verifier_env *env)
|
||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
addr = (long) tgt_prog->aux->func[subprog]->bpf_func;
|
if (subprog == 0)
|
||||||
|
addr = (long) tgt_prog->bpf_func;
|
||||||
|
else
|
||||||
|
addr = (long) tgt_prog->aux->func[subprog]->bpf_func;
|
||||||
} else {
|
} else {
|
||||||
addr = kallsyms_lookup_name(tname);
|
addr = kallsyms_lookup_name(tname);
|
||||||
if (!addr) {
|
if (!addr) {
|
||||||
|
|
|
@ -245,6 +245,12 @@ static int br_set_mac_address(struct net_device *dev, void *p)
|
||||||
if (!is_valid_ether_addr(addr->sa_data))
|
if (!is_valid_ether_addr(addr->sa_data))
|
||||||
return -EADDRNOTAVAIL;
|
return -EADDRNOTAVAIL;
|
||||||
|
|
||||||
|
/* dev_set_mac_addr() can be called by a master device on bridge's
|
||||||
|
* NETDEV_UNREGISTER, but since it's being destroyed do nothing
|
||||||
|
*/
|
||||||
|
if (dev->reg_state != NETREG_REGISTERED)
|
||||||
|
return -EBUSY;
|
||||||
|
|
||||||
spin_lock_bh(&br->lock);
|
spin_lock_bh(&br->lock);
|
||||||
if (!ether_addr_equal(dev->dev_addr, addr->sa_data)) {
|
if (!ether_addr_equal(dev->dev_addr, addr->sa_data)) {
|
||||||
/* Mac address will be changed in br_stp_change_bridge_id(). */
|
/* Mac address will be changed in br_stp_change_bridge_id(). */
|
||||||
|
|
|
@ -8188,7 +8188,8 @@ int __dev_set_mtu(struct net_device *dev, int new_mtu)
|
||||||
if (ops->ndo_change_mtu)
|
if (ops->ndo_change_mtu)
|
||||||
return ops->ndo_change_mtu(dev, new_mtu);
|
return ops->ndo_change_mtu(dev, new_mtu);
|
||||||
|
|
||||||
dev->mtu = new_mtu;
|
/* Pairs with all the lockless reads of dev->mtu in the stack */
|
||||||
|
WRITE_ONCE(dev->mtu, new_mtu);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(__dev_set_mtu);
|
EXPORT_SYMBOL(__dev_set_mtu);
|
||||||
|
@ -9246,7 +9247,7 @@ int register_netdevice(struct net_device *dev)
|
||||||
if (ret) {
|
if (ret) {
|
||||||
if (ret > 0)
|
if (ret > 0)
|
||||||
ret = -EIO;
|
ret = -EIO;
|
||||||
goto out;
|
goto err_free_name;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -9361,12 +9362,12 @@ out:
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
err_uninit:
|
err_uninit:
|
||||||
if (dev->name_node)
|
|
||||||
netdev_name_node_free(dev->name_node);
|
|
||||||
if (dev->netdev_ops->ndo_uninit)
|
if (dev->netdev_ops->ndo_uninit)
|
||||||
dev->netdev_ops->ndo_uninit(dev);
|
dev->netdev_ops->ndo_uninit(dev);
|
||||||
if (dev->priv_destructor)
|
if (dev->priv_destructor)
|
||||||
dev->priv_destructor(dev);
|
dev->priv_destructor(dev);
|
||||||
|
err_free_name:
|
||||||
|
netdev_name_node_free(dev->name_node);
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(register_netdevice);
|
EXPORT_SYMBOL(register_netdevice);
|
||||||
|
|
|
@ -759,6 +759,31 @@ __skb_flow_dissect_tcp(const struct sk_buff *skb,
|
||||||
key_tcp->flags = (*(__be16 *) &tcp_flag_word(th) & htons(0x0FFF));
|
key_tcp->flags = (*(__be16 *) &tcp_flag_word(th) & htons(0x0FFF));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
__skb_flow_dissect_ports(const struct sk_buff *skb,
|
||||||
|
struct flow_dissector *flow_dissector,
|
||||||
|
void *target_container, void *data, int nhoff,
|
||||||
|
u8 ip_proto, int hlen)
|
||||||
|
{
|
||||||
|
enum flow_dissector_key_id dissector_ports = FLOW_DISSECTOR_KEY_MAX;
|
||||||
|
struct flow_dissector_key_ports *key_ports;
|
||||||
|
|
||||||
|
if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_PORTS))
|
||||||
|
dissector_ports = FLOW_DISSECTOR_KEY_PORTS;
|
||||||
|
else if (dissector_uses_key(flow_dissector,
|
||||||
|
FLOW_DISSECTOR_KEY_PORTS_RANGE))
|
||||||
|
dissector_ports = FLOW_DISSECTOR_KEY_PORTS_RANGE;
|
||||||
|
|
||||||
|
if (dissector_ports == FLOW_DISSECTOR_KEY_MAX)
|
||||||
|
return;
|
||||||
|
|
||||||
|
key_ports = skb_flow_dissector_target(flow_dissector,
|
||||||
|
dissector_ports,
|
||||||
|
target_container);
|
||||||
|
key_ports->ports = __skb_flow_get_ports(skb, nhoff, ip_proto,
|
||||||
|
data, hlen);
|
||||||
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
__skb_flow_dissect_ipv4(const struct sk_buff *skb,
|
__skb_flow_dissect_ipv4(const struct sk_buff *skb,
|
||||||
struct flow_dissector *flow_dissector,
|
struct flow_dissector *flow_dissector,
|
||||||
|
@ -928,7 +953,6 @@ bool __skb_flow_dissect(const struct net *net,
|
||||||
struct flow_dissector_key_control *key_control;
|
struct flow_dissector_key_control *key_control;
|
||||||
struct flow_dissector_key_basic *key_basic;
|
struct flow_dissector_key_basic *key_basic;
|
||||||
struct flow_dissector_key_addrs *key_addrs;
|
struct flow_dissector_key_addrs *key_addrs;
|
||||||
struct flow_dissector_key_ports *key_ports;
|
|
||||||
struct flow_dissector_key_tags *key_tags;
|
struct flow_dissector_key_tags *key_tags;
|
||||||
struct flow_dissector_key_vlan *key_vlan;
|
struct flow_dissector_key_vlan *key_vlan;
|
||||||
struct bpf_prog *attached = NULL;
|
struct bpf_prog *attached = NULL;
|
||||||
|
@ -945,9 +969,10 @@ bool __skb_flow_dissect(const struct net *net,
|
||||||
nhoff = skb_network_offset(skb);
|
nhoff = skb_network_offset(skb);
|
||||||
hlen = skb_headlen(skb);
|
hlen = skb_headlen(skb);
|
||||||
#if IS_ENABLED(CONFIG_NET_DSA)
|
#if IS_ENABLED(CONFIG_NET_DSA)
|
||||||
if (unlikely(skb->dev && netdev_uses_dsa(skb->dev))) {
|
if (unlikely(skb->dev && netdev_uses_dsa(skb->dev) &&
|
||||||
|
proto == htons(ETH_P_XDSA))) {
|
||||||
const struct dsa_device_ops *ops;
|
const struct dsa_device_ops *ops;
|
||||||
int offset;
|
int offset = 0;
|
||||||
|
|
||||||
ops = skb->dev->dsa_ptr->tag_ops;
|
ops = skb->dev->dsa_ptr->tag_ops;
|
||||||
if (ops->flow_dissect &&
|
if (ops->flow_dissect &&
|
||||||
|
@ -1383,14 +1408,9 @@ ip_proto_again:
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_PORTS) &&
|
if (!(key_control->flags & FLOW_DIS_IS_FRAGMENT))
|
||||||
!(key_control->flags & FLOW_DIS_IS_FRAGMENT)) {
|
__skb_flow_dissect_ports(skb, flow_dissector, target_container,
|
||||||
key_ports = skb_flow_dissector_target(flow_dissector,
|
data, nhoff, ip_proto, hlen);
|
||||||
FLOW_DISSECTOR_KEY_PORTS,
|
|
||||||
target_container);
|
|
||||||
key_ports->ports = __skb_flow_get_ports(skb, nhoff, ip_proto,
|
|
||||||
data, hlen);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Process result of IP proto processing */
|
/* Process result of IP proto processing */
|
||||||
switch (fdret) {
|
switch (fdret) {
|
||||||
|
|
|
@ -283,7 +283,7 @@ int flow_block_cb_setup_simple(struct flow_block_offload *f,
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(flow_block_cb_setup_simple);
|
EXPORT_SYMBOL(flow_block_cb_setup_simple);
|
||||||
|
|
||||||
static LIST_HEAD(block_ing_cb_list);
|
static LIST_HEAD(block_cb_list);
|
||||||
|
|
||||||
static struct rhashtable indr_setup_block_ht;
|
static struct rhashtable indr_setup_block_ht;
|
||||||
|
|
||||||
|
@ -391,20 +391,19 @@ static void flow_indr_block_cb_del(struct flow_indr_block_cb *indr_block_cb)
|
||||||
kfree(indr_block_cb);
|
kfree(indr_block_cb);
|
||||||
}
|
}
|
||||||
|
|
||||||
static DEFINE_MUTEX(flow_indr_block_ing_cb_lock);
|
static DEFINE_MUTEX(flow_indr_block_cb_lock);
|
||||||
|
|
||||||
static void flow_block_ing_cmd(struct net_device *dev,
|
static void flow_block_cmd(struct net_device *dev,
|
||||||
flow_indr_block_bind_cb_t *cb,
|
flow_indr_block_bind_cb_t *cb, void *cb_priv,
|
||||||
void *cb_priv,
|
enum flow_block_command command)
|
||||||
enum flow_block_command command)
|
|
||||||
{
|
{
|
||||||
struct flow_indr_block_ing_entry *entry;
|
struct flow_indr_block_entry *entry;
|
||||||
|
|
||||||
mutex_lock(&flow_indr_block_ing_cb_lock);
|
mutex_lock(&flow_indr_block_cb_lock);
|
||||||
list_for_each_entry(entry, &block_ing_cb_list, list) {
|
list_for_each_entry(entry, &block_cb_list, list) {
|
||||||
entry->cb(dev, cb, cb_priv, command);
|
entry->cb(dev, cb, cb_priv, command);
|
||||||
}
|
}
|
||||||
mutex_unlock(&flow_indr_block_ing_cb_lock);
|
mutex_unlock(&flow_indr_block_cb_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
int __flow_indr_block_cb_register(struct net_device *dev, void *cb_priv,
|
int __flow_indr_block_cb_register(struct net_device *dev, void *cb_priv,
|
||||||
|
@ -424,8 +423,8 @@ int __flow_indr_block_cb_register(struct net_device *dev, void *cb_priv,
|
||||||
if (err)
|
if (err)
|
||||||
goto err_dev_put;
|
goto err_dev_put;
|
||||||
|
|
||||||
flow_block_ing_cmd(dev, indr_block_cb->cb, indr_block_cb->cb_priv,
|
flow_block_cmd(dev, indr_block_cb->cb, indr_block_cb->cb_priv,
|
||||||
FLOW_BLOCK_BIND);
|
FLOW_BLOCK_BIND);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
@ -464,8 +463,8 @@ void __flow_indr_block_cb_unregister(struct net_device *dev,
|
||||||
if (!indr_block_cb)
|
if (!indr_block_cb)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
flow_block_ing_cmd(dev, indr_block_cb->cb, indr_block_cb->cb_priv,
|
flow_block_cmd(dev, indr_block_cb->cb, indr_block_cb->cb_priv,
|
||||||
FLOW_BLOCK_UNBIND);
|
FLOW_BLOCK_UNBIND);
|
||||||
|
|
||||||
flow_indr_block_cb_del(indr_block_cb);
|
flow_indr_block_cb_del(indr_block_cb);
|
||||||
flow_indr_block_dev_put(indr_dev);
|
flow_indr_block_dev_put(indr_dev);
|
||||||
|
@ -499,21 +498,21 @@ void flow_indr_block_call(struct net_device *dev,
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(flow_indr_block_call);
|
EXPORT_SYMBOL_GPL(flow_indr_block_call);
|
||||||
|
|
||||||
void flow_indr_add_block_ing_cb(struct flow_indr_block_ing_entry *entry)
|
void flow_indr_add_block_cb(struct flow_indr_block_entry *entry)
|
||||||
{
|
{
|
||||||
mutex_lock(&flow_indr_block_ing_cb_lock);
|
mutex_lock(&flow_indr_block_cb_lock);
|
||||||
list_add_tail(&entry->list, &block_ing_cb_list);
|
list_add_tail(&entry->list, &block_cb_list);
|
||||||
mutex_unlock(&flow_indr_block_ing_cb_lock);
|
mutex_unlock(&flow_indr_block_cb_lock);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(flow_indr_add_block_ing_cb);
|
EXPORT_SYMBOL_GPL(flow_indr_add_block_cb);
|
||||||
|
|
||||||
void flow_indr_del_block_ing_cb(struct flow_indr_block_ing_entry *entry)
|
void flow_indr_del_block_cb(struct flow_indr_block_entry *entry)
|
||||||
{
|
{
|
||||||
mutex_lock(&flow_indr_block_ing_cb_lock);
|
mutex_lock(&flow_indr_block_cb_lock);
|
||||||
list_del(&entry->list);
|
list_del(&entry->list);
|
||||||
mutex_unlock(&flow_indr_block_ing_cb_lock);
|
mutex_unlock(&flow_indr_block_cb_lock);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(flow_indr_del_block_ing_cb);
|
EXPORT_SYMBOL_GPL(flow_indr_del_block_cb);
|
||||||
|
|
||||||
static int __init init_flow_indr_rhashtable(void)
|
static int __init init_flow_indr_rhashtable(void)
|
||||||
{
|
{
|
||||||
|
|
|
@ -230,9 +230,7 @@ static int bpf_lwt_xmit_reroute(struct sk_buff *skb)
|
||||||
fl6.daddr = iph6->daddr;
|
fl6.daddr = iph6->daddr;
|
||||||
fl6.saddr = iph6->saddr;
|
fl6.saddr = iph6->saddr;
|
||||||
|
|
||||||
err = ipv6_stub->ipv6_dst_lookup(net, skb->sk, &dst, &fl6);
|
dst = ipv6_stub->ipv6_dst_lookup_flow(net, skb->sk, &fl6, NULL);
|
||||||
if (unlikely(err))
|
|
||||||
goto err;
|
|
||||||
if (IS_ERR(dst)) {
|
if (IS_ERR(dst)) {
|
||||||
err = PTR_ERR(dst);
|
err = PTR_ERR(dst);
|
||||||
goto err;
|
goto err;
|
||||||
|
|
|
@ -1459,14 +1459,17 @@ static int netdev_queue_add_kobject(struct net_device *dev, int index)
|
||||||
struct kobject *kobj = &queue->kobj;
|
struct kobject *kobj = &queue->kobj;
|
||||||
int error = 0;
|
int error = 0;
|
||||||
|
|
||||||
|
/* Kobject_put later will trigger netdev_queue_release call
|
||||||
|
* which decreases dev refcount: Take that reference here
|
||||||
|
*/
|
||||||
|
dev_hold(queue->dev);
|
||||||
|
|
||||||
kobj->kset = dev->queues_kset;
|
kobj->kset = dev->queues_kset;
|
||||||
error = kobject_init_and_add(kobj, &netdev_queue_ktype, NULL,
|
error = kobject_init_and_add(kobj, &netdev_queue_ktype, NULL,
|
||||||
"tx-%u", index);
|
"tx-%u", index);
|
||||||
if (error)
|
if (error)
|
||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
dev_hold(queue->dev);
|
|
||||||
|
|
||||||
#ifdef CONFIG_BQL
|
#ifdef CONFIG_BQL
|
||||||
error = sysfs_create_group(kobj, &dql_group);
|
error = sysfs_create_group(kobj, &dql_group);
|
||||||
if (error)
|
if (error)
|
||||||
|
|
|
@ -1250,7 +1250,9 @@ static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb,
|
||||||
vf_spoofchk.vf =
|
vf_spoofchk.vf =
|
||||||
vf_linkstate.vf =
|
vf_linkstate.vf =
|
||||||
vf_rss_query_en.vf =
|
vf_rss_query_en.vf =
|
||||||
vf_trust.vf = ivi.vf;
|
vf_trust.vf =
|
||||||
|
node_guid.vf =
|
||||||
|
port_guid.vf = ivi.vf;
|
||||||
|
|
||||||
memcpy(vf_mac.mac, ivi.mac, sizeof(ivi.mac));
|
memcpy(vf_mac.mac, ivi.mac, sizeof(ivi.mac));
|
||||||
memcpy(vf_broadcast.broadcast, dev->broadcast, dev->addr_len);
|
memcpy(vf_broadcast.broadcast, dev->broadcast, dev->addr_len);
|
||||||
|
|
|
@ -5484,7 +5484,7 @@ static void skb_mod_eth_type(struct sk_buff *skb, struct ethhdr *hdr,
|
||||||
* Returns 0 on success, -errno otherwise.
|
* Returns 0 on success, -errno otherwise.
|
||||||
*/
|
*/
|
||||||
int skb_mpls_push(struct sk_buff *skb, __be32 mpls_lse, __be16 mpls_proto,
|
int skb_mpls_push(struct sk_buff *skb, __be32 mpls_lse, __be16 mpls_proto,
|
||||||
int mac_len)
|
int mac_len, bool ethernet)
|
||||||
{
|
{
|
||||||
struct mpls_shim_hdr *lse;
|
struct mpls_shim_hdr *lse;
|
||||||
int err;
|
int err;
|
||||||
|
@ -5515,7 +5515,7 @@ int skb_mpls_push(struct sk_buff *skb, __be32 mpls_lse, __be16 mpls_proto,
|
||||||
lse->label_stack_entry = mpls_lse;
|
lse->label_stack_entry = mpls_lse;
|
||||||
skb_postpush_rcsum(skb, lse, MPLS_HLEN);
|
skb_postpush_rcsum(skb, lse, MPLS_HLEN);
|
||||||
|
|
||||||
if (skb->dev && skb->dev->type == ARPHRD_ETHER)
|
if (ethernet)
|
||||||
skb_mod_eth_type(skb, eth_hdr(skb), mpls_proto);
|
skb_mod_eth_type(skb, eth_hdr(skb), mpls_proto);
|
||||||
skb->protocol = mpls_proto;
|
skb->protocol = mpls_proto;
|
||||||
|
|
||||||
|
@ -5529,12 +5529,14 @@ EXPORT_SYMBOL_GPL(skb_mpls_push);
|
||||||
* @skb: buffer
|
* @skb: buffer
|
||||||
* @next_proto: ethertype of header after popped MPLS header
|
* @next_proto: ethertype of header after popped MPLS header
|
||||||
* @mac_len: length of the MAC header
|
* @mac_len: length of the MAC header
|
||||||
|
* @ethernet: flag to indicate if ethernet header is present in packet
|
||||||
*
|
*
|
||||||
* Expects skb->data at mac header.
|
* Expects skb->data at mac header.
|
||||||
*
|
*
|
||||||
* Returns 0 on success, -errno otherwise.
|
* Returns 0 on success, -errno otherwise.
|
||||||
*/
|
*/
|
||||||
int skb_mpls_pop(struct sk_buff *skb, __be16 next_proto, int mac_len)
|
int skb_mpls_pop(struct sk_buff *skb, __be16 next_proto, int mac_len,
|
||||||
|
bool ethernet)
|
||||||
{
|
{
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
|
@ -5553,7 +5555,7 @@ int skb_mpls_pop(struct sk_buff *skb, __be16 next_proto, int mac_len)
|
||||||
skb_reset_mac_header(skb);
|
skb_reset_mac_header(skb);
|
||||||
skb_set_network_header(skb, mac_len);
|
skb_set_network_header(skb, mac_len);
|
||||||
|
|
||||||
if (skb->dev && skb->dev->type == ARPHRD_ETHER) {
|
if (ethernet) {
|
||||||
struct ethhdr *hdr;
|
struct ethhdr *hdr;
|
||||||
|
|
||||||
/* use mpls_hdr() to get ethertype to account for VLANs. */
|
/* use mpls_hdr() to get ethertype to account for VLANs. */
|
||||||
|
|
|
@ -80,12 +80,8 @@ static void mem_xa_remove(struct xdp_mem_allocator *xa)
|
||||||
{
|
{
|
||||||
trace_mem_disconnect(xa);
|
trace_mem_disconnect(xa);
|
||||||
|
|
||||||
mutex_lock(&mem_id_lock);
|
|
||||||
|
|
||||||
if (!rhashtable_remove_fast(mem_id_ht, &xa->node, mem_id_rht_params))
|
if (!rhashtable_remove_fast(mem_id_ht, &xa->node, mem_id_rht_params))
|
||||||
call_rcu(&xa->rcu, __xdp_mem_allocator_rcu_free);
|
call_rcu(&xa->rcu, __xdp_mem_allocator_rcu_free);
|
||||||
|
|
||||||
mutex_unlock(&mem_id_lock);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void mem_allocator_disconnect(void *allocator)
|
static void mem_allocator_disconnect(void *allocator)
|
||||||
|
@ -93,6 +89,8 @@ static void mem_allocator_disconnect(void *allocator)
|
||||||
struct xdp_mem_allocator *xa;
|
struct xdp_mem_allocator *xa;
|
||||||
struct rhashtable_iter iter;
|
struct rhashtable_iter iter;
|
||||||
|
|
||||||
|
mutex_lock(&mem_id_lock);
|
||||||
|
|
||||||
rhashtable_walk_enter(mem_id_ht, &iter);
|
rhashtable_walk_enter(mem_id_ht, &iter);
|
||||||
do {
|
do {
|
||||||
rhashtable_walk_start(&iter);
|
rhashtable_walk_start(&iter);
|
||||||
|
@ -106,6 +104,8 @@ static void mem_allocator_disconnect(void *allocator)
|
||||||
|
|
||||||
} while (xa == ERR_PTR(-EAGAIN));
|
} while (xa == ERR_PTR(-EAGAIN));
|
||||||
rhashtable_walk_exit(&iter);
|
rhashtable_walk_exit(&iter);
|
||||||
|
|
||||||
|
mutex_unlock(&mem_id_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void mem_id_disconnect(int id)
|
static void mem_id_disconnect(int id)
|
||||||
|
|
|
@ -210,7 +210,7 @@ static int dccp_v6_send_response(const struct sock *sk, struct request_sock *req
|
||||||
final_p = fl6_update_dst(&fl6, rcu_dereference(np->opt), &final);
|
final_p = fl6_update_dst(&fl6, rcu_dereference(np->opt), &final);
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
|
|
||||||
dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
|
dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p);
|
||||||
if (IS_ERR(dst)) {
|
if (IS_ERR(dst)) {
|
||||||
err = PTR_ERR(dst);
|
err = PTR_ERR(dst);
|
||||||
dst = NULL;
|
dst = NULL;
|
||||||
|
@ -282,7 +282,7 @@ static void dccp_v6_ctl_send_reset(const struct sock *sk, struct sk_buff *rxskb)
|
||||||
security_skb_classify_flow(rxskb, flowi6_to_flowi(&fl6));
|
security_skb_classify_flow(rxskb, flowi6_to_flowi(&fl6));
|
||||||
|
|
||||||
/* sk = NULL, but it is safe for now. RST socket required. */
|
/* sk = NULL, but it is safe for now. RST socket required. */
|
||||||
dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL);
|
dst = ip6_dst_lookup_flow(sock_net(ctl_sk), ctl_sk, &fl6, NULL);
|
||||||
if (!IS_ERR(dst)) {
|
if (!IS_ERR(dst)) {
|
||||||
skb_dst_set(skb, dst);
|
skb_dst_set(skb, dst);
|
||||||
ip6_xmit(ctl_sk, skb, &fl6, 0, NULL, 0, 0);
|
ip6_xmit(ctl_sk, skb, &fl6, 0, NULL, 0, 0);
|
||||||
|
@ -912,7 +912,7 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
|
||||||
opt = rcu_dereference_protected(np->opt, lockdep_sock_is_held(sk));
|
opt = rcu_dereference_protected(np->opt, lockdep_sock_is_held(sk));
|
||||||
final_p = fl6_update_dst(&fl6, opt, &final);
|
final_p = fl6_update_dst(&fl6, opt, &final);
|
||||||
|
|
||||||
dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
|
dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p);
|
||||||
if (IS_ERR(dst)) {
|
if (IS_ERR(dst)) {
|
||||||
err = PTR_ERR(dst);
|
err = PTR_ERR(dst);
|
||||||
goto failure;
|
goto failure;
|
||||||
|
|
|
@ -227,8 +227,13 @@ static int hsr_dev_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||||
struct hsr_port *master;
|
struct hsr_port *master;
|
||||||
|
|
||||||
master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
|
master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
|
||||||
skb->dev = master->dev;
|
if (master) {
|
||||||
hsr_forward_skb(skb, master);
|
skb->dev = master->dev;
|
||||||
|
hsr_forward_skb(skb, master);
|
||||||
|
} else {
|
||||||
|
atomic_long_inc(&dev->tx_dropped);
|
||||||
|
dev_kfree_skb_any(skb);
|
||||||
|
}
|
||||||
return NETDEV_TX_OK;
|
return NETDEV_TX_OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1496,11 +1496,6 @@ skip:
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool inetdev_valid_mtu(unsigned int mtu)
|
|
||||||
{
|
|
||||||
return mtu >= IPV4_MIN_MTU;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void inetdev_send_gratuitous_arp(struct net_device *dev,
|
static void inetdev_send_gratuitous_arp(struct net_device *dev,
|
||||||
struct in_device *in_dev)
|
struct in_device *in_dev)
|
||||||
|
|
||||||
|
|
|
@ -127,7 +127,7 @@ int gre_parse_header(struct sk_buff *skb, struct tnl_ptk_info *tpi,
|
||||||
if (!pskb_may_pull(skb, nhs + hdr_len + sizeof(*ershdr)))
|
if (!pskb_may_pull(skb, nhs + hdr_len + sizeof(*ershdr)))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
ershdr = (struct erspan_base_hdr *)options;
|
ershdr = (struct erspan_base_hdr *)(skb->data + nhs + hdr_len);
|
||||||
tpi->key = cpu_to_be32(get_session_id(ershdr));
|
tpi->key = cpu_to_be32(get_session_id(ershdr));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1258,15 +1258,18 @@ static int ip_setup_cork(struct sock *sk, struct inet_cork *cork,
|
||||||
cork->addr = ipc->addr;
|
cork->addr = ipc->addr;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* We steal reference to this route, caller should not release it
|
|
||||||
*/
|
|
||||||
*rtp = NULL;
|
|
||||||
cork->fragsize = ip_sk_use_pmtu(sk) ?
|
cork->fragsize = ip_sk_use_pmtu(sk) ?
|
||||||
dst_mtu(&rt->dst) : rt->dst.dev->mtu;
|
dst_mtu(&rt->dst) : READ_ONCE(rt->dst.dev->mtu);
|
||||||
|
|
||||||
|
if (!inetdev_valid_mtu(cork->fragsize))
|
||||||
|
return -ENETUNREACH;
|
||||||
|
|
||||||
cork->gso_size = ipc->gso_size;
|
cork->gso_size = ipc->gso_size;
|
||||||
|
|
||||||
cork->dst = &rt->dst;
|
cork->dst = &rt->dst;
|
||||||
|
/* We stole this route, caller should not release it. */
|
||||||
|
*rtp = NULL;
|
||||||
|
|
||||||
cork->length = 0;
|
cork->length = 0;
|
||||||
cork->ttl = ipc->ttl;
|
cork->ttl = ipc->ttl;
|
||||||
cork->tos = ipc->tos;
|
cork->tos = ipc->tos;
|
||||||
|
|
|
@ -755,8 +755,9 @@ static unsigned int tcp_established_options(struct sock *sk, struct sk_buff *skb
|
||||||
min_t(unsigned int, eff_sacks,
|
min_t(unsigned int, eff_sacks,
|
||||||
(remaining - TCPOLEN_SACK_BASE_ALIGNED) /
|
(remaining - TCPOLEN_SACK_BASE_ALIGNED) /
|
||||||
TCPOLEN_SACK_PERBLOCK);
|
TCPOLEN_SACK_PERBLOCK);
|
||||||
size += TCPOLEN_SACK_BASE_ALIGNED +
|
if (likely(opts->num_sack_blocks))
|
||||||
opts->num_sack_blocks * TCPOLEN_SACK_PERBLOCK;
|
size += TCPOLEN_SACK_BASE_ALIGNED +
|
||||||
|
opts->num_sack_blocks * TCPOLEN_SACK_PERBLOCK;
|
||||||
}
|
}
|
||||||
|
|
||||||
return size;
|
return size;
|
||||||
|
|
|
@ -434,6 +434,7 @@ void tcp_retransmit_timer(struct sock *sk)
|
||||||
struct net *net = sock_net(sk);
|
struct net *net = sock_net(sk);
|
||||||
struct inet_connection_sock *icsk = inet_csk(sk);
|
struct inet_connection_sock *icsk = inet_csk(sk);
|
||||||
struct request_sock *req;
|
struct request_sock *req;
|
||||||
|
struct sk_buff *skb;
|
||||||
|
|
||||||
req = rcu_dereference_protected(tp->fastopen_rsk,
|
req = rcu_dereference_protected(tp->fastopen_rsk,
|
||||||
lockdep_sock_is_held(sk));
|
lockdep_sock_is_held(sk));
|
||||||
|
@ -446,7 +447,12 @@ void tcp_retransmit_timer(struct sock *sk)
|
||||||
*/
|
*/
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
if (!tp->packets_out || WARN_ON_ONCE(tcp_rtx_queue_empty(sk)))
|
|
||||||
|
if (!tp->packets_out)
|
||||||
|
return;
|
||||||
|
|
||||||
|
skb = tcp_rtx_queue_head(sk);
|
||||||
|
if (WARN_ON_ONCE(!skb))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
tp->tlp_high_seq = 0;
|
tp->tlp_high_seq = 0;
|
||||||
|
@ -480,7 +486,7 @@ void tcp_retransmit_timer(struct sock *sk)
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
tcp_enter_loss(sk);
|
tcp_enter_loss(sk);
|
||||||
tcp_retransmit_skb(sk, tcp_rtx_queue_head(sk), 1);
|
tcp_retransmit_skb(sk, skb, 1);
|
||||||
__sk_dst_reset(sk);
|
__sk_dst_reset(sk);
|
||||||
goto out_reset_timer;
|
goto out_reset_timer;
|
||||||
}
|
}
|
||||||
|
|
|
@ -129,11 +129,12 @@ int inet6addr_validator_notifier_call_chain(unsigned long val, void *v)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(inet6addr_validator_notifier_call_chain);
|
EXPORT_SYMBOL(inet6addr_validator_notifier_call_chain);
|
||||||
|
|
||||||
static int eafnosupport_ipv6_dst_lookup(struct net *net, struct sock *u1,
|
static struct dst_entry *eafnosupport_ipv6_dst_lookup_flow(struct net *net,
|
||||||
struct dst_entry **u2,
|
const struct sock *sk,
|
||||||
struct flowi6 *u3)
|
struct flowi6 *fl6,
|
||||||
|
const struct in6_addr *final_dst)
|
||||||
{
|
{
|
||||||
return -EAFNOSUPPORT;
|
return ERR_PTR(-EAFNOSUPPORT);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int eafnosupport_ipv6_route_input(struct sk_buff *skb)
|
static int eafnosupport_ipv6_route_input(struct sk_buff *skb)
|
||||||
|
@ -190,7 +191,7 @@ static int eafnosupport_ip6_del_rt(struct net *net, struct fib6_info *rt)
|
||||||
}
|
}
|
||||||
|
|
||||||
const struct ipv6_stub *ipv6_stub __read_mostly = &(struct ipv6_stub) {
|
const struct ipv6_stub *ipv6_stub __read_mostly = &(struct ipv6_stub) {
|
||||||
.ipv6_dst_lookup = eafnosupport_ipv6_dst_lookup,
|
.ipv6_dst_lookup_flow = eafnosupport_ipv6_dst_lookup_flow,
|
||||||
.ipv6_route_input = eafnosupport_ipv6_route_input,
|
.ipv6_route_input = eafnosupport_ipv6_route_input,
|
||||||
.fib6_get_table = eafnosupport_fib6_get_table,
|
.fib6_get_table = eafnosupport_fib6_get_table,
|
||||||
.fib6_table_lookup = eafnosupport_fib6_table_lookup,
|
.fib6_table_lookup = eafnosupport_fib6_table_lookup,
|
||||||
|
|
|
@ -765,7 +765,7 @@ int inet6_sk_rebuild_header(struct sock *sk)
|
||||||
&final);
|
&final);
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
|
|
||||||
dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
|
dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p);
|
||||||
if (IS_ERR(dst)) {
|
if (IS_ERR(dst)) {
|
||||||
sk->sk_route_caps = 0;
|
sk->sk_route_caps = 0;
|
||||||
sk->sk_err_soft = -PTR_ERR(dst);
|
sk->sk_err_soft = -PTR_ERR(dst);
|
||||||
|
@ -946,7 +946,7 @@ static int ipv6_route_input(struct sk_buff *skb)
|
||||||
static const struct ipv6_stub ipv6_stub_impl = {
|
static const struct ipv6_stub ipv6_stub_impl = {
|
||||||
.ipv6_sock_mc_join = ipv6_sock_mc_join,
|
.ipv6_sock_mc_join = ipv6_sock_mc_join,
|
||||||
.ipv6_sock_mc_drop = ipv6_sock_mc_drop,
|
.ipv6_sock_mc_drop = ipv6_sock_mc_drop,
|
||||||
.ipv6_dst_lookup = ip6_dst_lookup,
|
.ipv6_dst_lookup_flow = ip6_dst_lookup_flow,
|
||||||
.ipv6_route_input = ipv6_route_input,
|
.ipv6_route_input = ipv6_route_input,
|
||||||
.fib6_get_table = fib6_get_table,
|
.fib6_get_table = fib6_get_table,
|
||||||
.fib6_table_lookup = fib6_table_lookup,
|
.fib6_table_lookup = fib6_table_lookup,
|
||||||
|
|
|
@ -85,7 +85,7 @@ int ip6_datagram_dst_update(struct sock *sk, bool fix_sk_saddr)
|
||||||
final_p = fl6_update_dst(&fl6, opt, &final);
|
final_p = fl6_update_dst(&fl6, opt, &final);
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
|
|
||||||
dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
|
dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p);
|
||||||
if (IS_ERR(dst)) {
|
if (IS_ERR(dst)) {
|
||||||
err = PTR_ERR(dst);
|
err = PTR_ERR(dst);
|
||||||
goto out;
|
goto out;
|
||||||
|
|
|
@ -48,7 +48,7 @@ struct dst_entry *inet6_csk_route_req(const struct sock *sk,
|
||||||
fl6->flowi6_uid = sk->sk_uid;
|
fl6->flowi6_uid = sk->sk_uid;
|
||||||
security_req_classify_flow(req, flowi6_to_flowi(fl6));
|
security_req_classify_flow(req, flowi6_to_flowi(fl6));
|
||||||
|
|
||||||
dst = ip6_dst_lookup_flow(sk, fl6, final_p);
|
dst = ip6_dst_lookup_flow(sock_net(sk), sk, fl6, final_p);
|
||||||
if (IS_ERR(dst))
|
if (IS_ERR(dst))
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
|
@ -103,7 +103,7 @@ static struct dst_entry *inet6_csk_route_socket(struct sock *sk,
|
||||||
|
|
||||||
dst = __inet6_csk_dst_check(sk, np->dst_cookie);
|
dst = __inet6_csk_dst_check(sk, np->dst_cookie);
|
||||||
if (!dst) {
|
if (!dst) {
|
||||||
dst = ip6_dst_lookup_flow(sk, fl6, final_p);
|
dst = ip6_dst_lookup_flow(sock_net(sk), sk, fl6, final_p);
|
||||||
|
|
||||||
if (!IS_ERR(dst))
|
if (!IS_ERR(dst))
|
||||||
ip6_dst_store(sk, dst, NULL, NULL);
|
ip6_dst_store(sk, dst, NULL, NULL);
|
||||||
|
|
|
@ -1144,19 +1144,19 @@ EXPORT_SYMBOL_GPL(ip6_dst_lookup);
|
||||||
* It returns a valid dst pointer on success, or a pointer encoded
|
* It returns a valid dst pointer on success, or a pointer encoded
|
||||||
* error code.
|
* error code.
|
||||||
*/
|
*/
|
||||||
struct dst_entry *ip6_dst_lookup_flow(const struct sock *sk, struct flowi6 *fl6,
|
struct dst_entry *ip6_dst_lookup_flow(struct net *net, const struct sock *sk, struct flowi6 *fl6,
|
||||||
const struct in6_addr *final_dst)
|
const struct in6_addr *final_dst)
|
||||||
{
|
{
|
||||||
struct dst_entry *dst = NULL;
|
struct dst_entry *dst = NULL;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
err = ip6_dst_lookup_tail(sock_net(sk), sk, &dst, fl6);
|
err = ip6_dst_lookup_tail(net, sk, &dst, fl6);
|
||||||
if (err)
|
if (err)
|
||||||
return ERR_PTR(err);
|
return ERR_PTR(err);
|
||||||
if (final_dst)
|
if (final_dst)
|
||||||
fl6->daddr = *final_dst;
|
fl6->daddr = *final_dst;
|
||||||
|
|
||||||
return xfrm_lookup_route(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0);
|
return xfrm_lookup_route(net, dst, flowi6_to_flowi(fl6), sk, 0);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(ip6_dst_lookup_flow);
|
EXPORT_SYMBOL_GPL(ip6_dst_lookup_flow);
|
||||||
|
|
||||||
|
@ -1188,7 +1188,7 @@ struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
|
||||||
if (dst)
|
if (dst)
|
||||||
return dst;
|
return dst;
|
||||||
|
|
||||||
dst = ip6_dst_lookup_flow(sk, fl6, final_dst);
|
dst = ip6_dst_lookup_flow(sock_net(sk), sk, fl6, final_dst);
|
||||||
if (connected && !IS_ERR(dst))
|
if (connected && !IS_ERR(dst))
|
||||||
ip6_sk_dst_store_flow(sk, dst_clone(dst), fl6);
|
ip6_sk_dst_store_flow(sk, dst_clone(dst), fl6);
|
||||||
|
|
||||||
|
|
|
@ -925,7 +925,7 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
|
||||||
|
|
||||||
fl6.flowlabel = ip6_make_flowinfo(ipc6.tclass, fl6.flowlabel);
|
fl6.flowlabel = ip6_make_flowinfo(ipc6.tclass, fl6.flowlabel);
|
||||||
|
|
||||||
dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
|
dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p);
|
||||||
if (IS_ERR(dst)) {
|
if (IS_ERR(dst)) {
|
||||||
err = PTR_ERR(dst);
|
err = PTR_ERR(dst);
|
||||||
goto out;
|
goto out;
|
||||||
|
|
|
@ -235,7 +235,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
|
||||||
fl6.flowi6_uid = sk->sk_uid;
|
fl6.flowi6_uid = sk->sk_uid;
|
||||||
security_req_classify_flow(req, flowi6_to_flowi(&fl6));
|
security_req_classify_flow(req, flowi6_to_flowi(&fl6));
|
||||||
|
|
||||||
dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
|
dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p);
|
||||||
if (IS_ERR(dst))
|
if (IS_ERR(dst))
|
||||||
goto out_free;
|
goto out_free;
|
||||||
}
|
}
|
||||||
|
|
|
@ -275,7 +275,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
|
||||||
|
|
||||||
security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
|
security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
|
||||||
|
|
||||||
dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
|
dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p);
|
||||||
if (IS_ERR(dst)) {
|
if (IS_ERR(dst)) {
|
||||||
err = PTR_ERR(dst);
|
err = PTR_ERR(dst);
|
||||||
goto failure;
|
goto failure;
|
||||||
|
@ -906,7 +906,7 @@ static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32
|
||||||
* Underlying function will use this to retrieve the network
|
* Underlying function will use this to retrieve the network
|
||||||
* namespace
|
* namespace
|
||||||
*/
|
*/
|
||||||
dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL);
|
dst = ip6_dst_lookup_flow(sock_net(ctl_sk), ctl_sk, &fl6, NULL);
|
||||||
if (!IS_ERR(dst)) {
|
if (!IS_ERR(dst)) {
|
||||||
skb_dst_set(buff, dst);
|
skb_dst_set(buff, dst);
|
||||||
ip6_xmit(ctl_sk, buff, &fl6, fl6.flowi6_mark, NULL, tclass,
|
ip6_xmit(ctl_sk, buff, &fl6, fl6.flowi6_mark, NULL, tclass,
|
||||||
|
|
|
@ -615,7 +615,7 @@ static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
|
||||||
|
|
||||||
fl6.flowlabel = ip6_make_flowinfo(ipc6.tclass, fl6.flowlabel);
|
fl6.flowlabel = ip6_make_flowinfo(ipc6.tclass, fl6.flowlabel);
|
||||||
|
|
||||||
dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
|
dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p);
|
||||||
if (IS_ERR(dst)) {
|
if (IS_ERR(dst)) {
|
||||||
err = PTR_ERR(dst);
|
err = PTR_ERR(dst);
|
||||||
goto out;
|
goto out;
|
||||||
|
|
|
@ -617,16 +617,15 @@ static struct net_device *inet6_fib_lookup_dev(struct net *net,
|
||||||
struct net_device *dev;
|
struct net_device *dev;
|
||||||
struct dst_entry *dst;
|
struct dst_entry *dst;
|
||||||
struct flowi6 fl6;
|
struct flowi6 fl6;
|
||||||
int err;
|
|
||||||
|
|
||||||
if (!ipv6_stub)
|
if (!ipv6_stub)
|
||||||
return ERR_PTR(-EAFNOSUPPORT);
|
return ERR_PTR(-EAFNOSUPPORT);
|
||||||
|
|
||||||
memset(&fl6, 0, sizeof(fl6));
|
memset(&fl6, 0, sizeof(fl6));
|
||||||
memcpy(&fl6.daddr, addr, sizeof(struct in6_addr));
|
memcpy(&fl6.daddr, addr, sizeof(struct in6_addr));
|
||||||
err = ipv6_stub->ipv6_dst_lookup(net, NULL, &dst, &fl6);
|
dst = ipv6_stub->ipv6_dst_lookup_flow(net, NULL, &fl6, NULL);
|
||||||
if (err)
|
if (IS_ERR(dst))
|
||||||
return ERR_PTR(err);
|
return ERR_CAST(dst);
|
||||||
|
|
||||||
dev = dst->dev;
|
dev = dst->dev;
|
||||||
dev_hold(dev);
|
dev_hold(dev);
|
||||||
|
|
|
@ -588,7 +588,7 @@ static int nft_offload_netdev_event(struct notifier_block *this,
|
||||||
return NOTIFY_DONE;
|
return NOTIFY_DONE;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct flow_indr_block_ing_entry block_ing_entry = {
|
static struct flow_indr_block_entry block_ing_entry = {
|
||||||
.cb = nft_indr_block_cb,
|
.cb = nft_indr_block_cb,
|
||||||
.list = LIST_HEAD_INIT(block_ing_entry.list),
|
.list = LIST_HEAD_INIT(block_ing_entry.list),
|
||||||
};
|
};
|
||||||
|
@ -605,13 +605,13 @@ int nft_offload_init(void)
|
||||||
if (err < 0)
|
if (err < 0)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
flow_indr_add_block_ing_cb(&block_ing_entry);
|
flow_indr_add_block_cb(&block_ing_entry);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void nft_offload_exit(void)
|
void nft_offload_exit(void)
|
||||||
{
|
{
|
||||||
flow_indr_del_block_ing_cb(&block_ing_entry);
|
flow_indr_del_block_cb(&block_ing_entry);
|
||||||
unregister_netdevice_notifier(&nft_offload_netdev_notifier);
|
unregister_netdevice_notifier(&nft_offload_netdev_notifier);
|
||||||
}
|
}
|
||||||
|
|
|
@ -44,7 +44,8 @@ static int __nci_spi_send(struct nci_spi *nspi, struct sk_buff *skb,
|
||||||
t.len = 0;
|
t.len = 0;
|
||||||
}
|
}
|
||||||
t.cs_change = cs_change;
|
t.cs_change = cs_change;
|
||||||
t.delay_usecs = nspi->xfer_udelay;
|
t.delay.value = nspi->xfer_udelay;
|
||||||
|
t.delay.unit = SPI_DELAY_UNIT_USECS;
|
||||||
t.speed_hz = nspi->xfer_speed_hz;
|
t.speed_hz = nspi->xfer_speed_hz;
|
||||||
|
|
||||||
spi_message_init(&m);
|
spi_message_init(&m);
|
||||||
|
@ -216,7 +217,8 @@ static struct sk_buff *__nci_spi_read(struct nci_spi *nspi)
|
||||||
rx.rx_buf = skb_put(skb, rx_len);
|
rx.rx_buf = skb_put(skb, rx_len);
|
||||||
rx.len = rx_len;
|
rx.len = rx_len;
|
||||||
rx.cs_change = 0;
|
rx.cs_change = 0;
|
||||||
rx.delay_usecs = nspi->xfer_udelay;
|
rx.delay.value = nspi->xfer_udelay;
|
||||||
|
rx.delay.unit = SPI_DELAY_UNIT_USECS;
|
||||||
rx.speed_hz = nspi->xfer_speed_hz;
|
rx.speed_hz = nspi->xfer_speed_hz;
|
||||||
spi_message_add_tail(&rx, &m);
|
spi_message_add_tail(&rx, &m);
|
||||||
|
|
||||||
|
|
|
@ -166,7 +166,8 @@ static int push_mpls(struct sk_buff *skb, struct sw_flow_key *key,
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
err = skb_mpls_push(skb, mpls->mpls_lse, mpls->mpls_ethertype,
|
err = skb_mpls_push(skb, mpls->mpls_lse, mpls->mpls_ethertype,
|
||||||
skb->mac_len);
|
skb->mac_len,
|
||||||
|
ovs_key_mac_proto(key) == MAC_PROTO_ETHERNET);
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
|
@ -179,7 +180,8 @@ static int pop_mpls(struct sk_buff *skb, struct sw_flow_key *key,
|
||||||
{
|
{
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
err = skb_mpls_pop(skb, ethertype, skb->mac_len);
|
err = skb_mpls_pop(skb, ethertype, skb->mac_len,
|
||||||
|
ovs_key_mac_proto(key) == MAC_PROTO_ETHERNET);
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
|
|
|
@ -903,6 +903,17 @@ static int ovs_ct_nat(struct net *net, struct sw_flow_key *key,
|
||||||
}
|
}
|
||||||
err = ovs_ct_nat_execute(skb, ct, ctinfo, &info->range, maniptype);
|
err = ovs_ct_nat_execute(skb, ct, ctinfo, &info->range, maniptype);
|
||||||
|
|
||||||
|
if (err == NF_ACCEPT &&
|
||||||
|
ct->status & IPS_SRC_NAT && ct->status & IPS_DST_NAT) {
|
||||||
|
if (maniptype == NF_NAT_MANIP_SRC)
|
||||||
|
maniptype = NF_NAT_MANIP_DST;
|
||||||
|
else
|
||||||
|
maniptype = NF_NAT_MANIP_SRC;
|
||||||
|
|
||||||
|
err = ovs_ct_nat_execute(skb, ct, ctinfo, &info->range,
|
||||||
|
maniptype);
|
||||||
|
}
|
||||||
|
|
||||||
/* Mark NAT done if successful and update the flow key. */
|
/* Mark NAT done if successful and update the flow key. */
|
||||||
if (err == NF_ACCEPT)
|
if (err == NF_ACCEPT)
|
||||||
ovs_nat_update_key(key, skb, maniptype);
|
ovs_nat_update_key(key, skb, maniptype);
|
||||||
|
|
|
@ -329,6 +329,7 @@ static int tcf_ct_act_nat(struct sk_buff *skb,
|
||||||
bool commit)
|
bool commit)
|
||||||
{
|
{
|
||||||
#if IS_ENABLED(CONFIG_NF_NAT)
|
#if IS_ENABLED(CONFIG_NF_NAT)
|
||||||
|
int err;
|
||||||
enum nf_nat_manip_type maniptype;
|
enum nf_nat_manip_type maniptype;
|
||||||
|
|
||||||
if (!(ct_action & TCA_CT_ACT_NAT))
|
if (!(ct_action & TCA_CT_ACT_NAT))
|
||||||
|
@ -359,7 +360,17 @@ static int tcf_ct_act_nat(struct sk_buff *skb,
|
||||||
return NF_ACCEPT;
|
return NF_ACCEPT;
|
||||||
}
|
}
|
||||||
|
|
||||||
return ct_nat_execute(skb, ct, ctinfo, range, maniptype);
|
err = ct_nat_execute(skb, ct, ctinfo, range, maniptype);
|
||||||
|
if (err == NF_ACCEPT &&
|
||||||
|
ct->status & IPS_SRC_NAT && ct->status & IPS_DST_NAT) {
|
||||||
|
if (maniptype == NF_NAT_MANIP_SRC)
|
||||||
|
maniptype = NF_NAT_MANIP_DST;
|
||||||
|
else
|
||||||
|
maniptype = NF_NAT_MANIP_SRC;
|
||||||
|
|
||||||
|
err = ct_nat_execute(skb, ct, ctinfo, range, maniptype);
|
||||||
|
}
|
||||||
|
return err;
|
||||||
#else
|
#else
|
||||||
return NF_ACCEPT;
|
return NF_ACCEPT;
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
|
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
|
||||||
/* Copyright (C) 2019 Netronome Systems, Inc. */
|
/* Copyright (C) 2019 Netronome Systems, Inc. */
|
||||||
|
|
||||||
|
#include <linux/if_arp.h>
|
||||||
#include <linux/init.h>
|
#include <linux/init.h>
|
||||||
#include <linux/kernel.h>
|
#include <linux/kernel.h>
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
|
@ -76,12 +77,14 @@ static int tcf_mpls_act(struct sk_buff *skb, const struct tc_action *a,
|
||||||
|
|
||||||
switch (p->tcfm_action) {
|
switch (p->tcfm_action) {
|
||||||
case TCA_MPLS_ACT_POP:
|
case TCA_MPLS_ACT_POP:
|
||||||
if (skb_mpls_pop(skb, p->tcfm_proto, mac_len))
|
if (skb_mpls_pop(skb, p->tcfm_proto, mac_len,
|
||||||
|
skb->dev && skb->dev->type == ARPHRD_ETHER))
|
||||||
goto drop;
|
goto drop;
|
||||||
break;
|
break;
|
||||||
case TCA_MPLS_ACT_PUSH:
|
case TCA_MPLS_ACT_PUSH:
|
||||||
new_lse = tcf_mpls_get_lse(NULL, p, !eth_p_mpls(skb->protocol));
|
new_lse = tcf_mpls_get_lse(NULL, p, !eth_p_mpls(skb->protocol));
|
||||||
if (skb_mpls_push(skb, new_lse, p->tcfm_proto, mac_len))
|
if (skb_mpls_push(skb, new_lse, p->tcfm_proto, mac_len,
|
||||||
|
skb->dev && skb->dev->type == ARPHRD_ETHER))
|
||||||
goto drop;
|
goto drop;
|
||||||
break;
|
break;
|
||||||
case TCA_MPLS_ACT_MODIFY:
|
case TCA_MPLS_ACT_MODIFY:
|
||||||
|
|
|
@ -626,15 +626,15 @@ static void tcf_chain_flush(struct tcf_chain *chain, bool rtnl_held)
|
||||||
static int tcf_block_setup(struct tcf_block *block,
|
static int tcf_block_setup(struct tcf_block *block,
|
||||||
struct flow_block_offload *bo);
|
struct flow_block_offload *bo);
|
||||||
|
|
||||||
static void tc_indr_block_ing_cmd(struct net_device *dev,
|
static void tc_indr_block_cmd(struct net_device *dev, struct tcf_block *block,
|
||||||
struct tcf_block *block,
|
flow_indr_block_bind_cb_t *cb, void *cb_priv,
|
||||||
flow_indr_block_bind_cb_t *cb,
|
enum flow_block_command command, bool ingress)
|
||||||
void *cb_priv,
|
|
||||||
enum flow_block_command command)
|
|
||||||
{
|
{
|
||||||
struct flow_block_offload bo = {
|
struct flow_block_offload bo = {
|
||||||
.command = command,
|
.command = command,
|
||||||
.binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS,
|
.binder_type = ingress ?
|
||||||
|
FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS :
|
||||||
|
FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS,
|
||||||
.net = dev_net(dev),
|
.net = dev_net(dev),
|
||||||
.block_shared = tcf_block_non_null_shared(block),
|
.block_shared = tcf_block_non_null_shared(block),
|
||||||
};
|
};
|
||||||
|
@ -652,9 +652,10 @@ static void tc_indr_block_ing_cmd(struct net_device *dev,
|
||||||
up_write(&block->cb_lock);
|
up_write(&block->cb_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct tcf_block *tc_dev_ingress_block(struct net_device *dev)
|
static struct tcf_block *tc_dev_block(struct net_device *dev, bool ingress)
|
||||||
{
|
{
|
||||||
const struct Qdisc_class_ops *cops;
|
const struct Qdisc_class_ops *cops;
|
||||||
|
const struct Qdisc_ops *ops;
|
||||||
struct Qdisc *qdisc;
|
struct Qdisc *qdisc;
|
||||||
|
|
||||||
if (!dev_ingress_queue(dev))
|
if (!dev_ingress_queue(dev))
|
||||||
|
@ -664,24 +665,37 @@ static struct tcf_block *tc_dev_ingress_block(struct net_device *dev)
|
||||||
if (!qdisc)
|
if (!qdisc)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
cops = qdisc->ops->cl_ops;
|
ops = qdisc->ops;
|
||||||
|
if (!ops)
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
if (!ingress && !strcmp("ingress", ops->id))
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
cops = ops->cl_ops;
|
||||||
if (!cops)
|
if (!cops)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
if (!cops->tcf_block)
|
if (!cops->tcf_block)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
return cops->tcf_block(qdisc, TC_H_MIN_INGRESS, NULL);
|
return cops->tcf_block(qdisc,
|
||||||
|
ingress ? TC_H_MIN_INGRESS : TC_H_MIN_EGRESS,
|
||||||
|
NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void tc_indr_block_get_and_ing_cmd(struct net_device *dev,
|
static void tc_indr_block_get_and_cmd(struct net_device *dev,
|
||||||
flow_indr_block_bind_cb_t *cb,
|
flow_indr_block_bind_cb_t *cb,
|
||||||
void *cb_priv,
|
void *cb_priv,
|
||||||
enum flow_block_command command)
|
enum flow_block_command command)
|
||||||
{
|
{
|
||||||
struct tcf_block *block = tc_dev_ingress_block(dev);
|
struct tcf_block *block;
|
||||||
|
|
||||||
tc_indr_block_ing_cmd(dev, block, cb, cb_priv, command);
|
block = tc_dev_block(dev, true);
|
||||||
|
tc_indr_block_cmd(dev, block, cb, cb_priv, command, true);
|
||||||
|
|
||||||
|
block = tc_dev_block(dev, false);
|
||||||
|
tc_indr_block_cmd(dev, block, cb, cb_priv, command, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void tc_indr_block_call(struct tcf_block *block,
|
static void tc_indr_block_call(struct tcf_block *block,
|
||||||
|
@ -2721,13 +2735,19 @@ static int tc_chain_tmplt_add(struct tcf_chain *chain, struct net *net,
|
||||||
struct netlink_ext_ack *extack)
|
struct netlink_ext_ack *extack)
|
||||||
{
|
{
|
||||||
const struct tcf_proto_ops *ops;
|
const struct tcf_proto_ops *ops;
|
||||||
|
char name[IFNAMSIZ];
|
||||||
void *tmplt_priv;
|
void *tmplt_priv;
|
||||||
|
|
||||||
/* If kind is not set, user did not specify template. */
|
/* If kind is not set, user did not specify template. */
|
||||||
if (!tca[TCA_KIND])
|
if (!tca[TCA_KIND])
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
ops = tcf_proto_lookup_ops(nla_data(tca[TCA_KIND]), true, extack);
|
if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
|
||||||
|
NL_SET_ERR_MSG(extack, "Specified TC chain template name too long");
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
ops = tcf_proto_lookup_ops(name, true, extack);
|
||||||
if (IS_ERR(ops))
|
if (IS_ERR(ops))
|
||||||
return PTR_ERR(ops);
|
return PTR_ERR(ops);
|
||||||
if (!ops->tmplt_create || !ops->tmplt_destroy || !ops->tmplt_dump) {
|
if (!ops->tmplt_create || !ops->tmplt_destroy || !ops->tmplt_dump) {
|
||||||
|
@ -3626,9 +3646,9 @@ static struct pernet_operations tcf_net_ops = {
|
||||||
.size = sizeof(struct tcf_net),
|
.size = sizeof(struct tcf_net),
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct flow_indr_block_ing_entry block_ing_entry = {
|
static struct flow_indr_block_entry block_entry = {
|
||||||
.cb = tc_indr_block_get_and_ing_cmd,
|
.cb = tc_indr_block_get_and_cmd,
|
||||||
.list = LIST_HEAD_INIT(block_ing_entry.list),
|
.list = LIST_HEAD_INIT(block_entry.list),
|
||||||
};
|
};
|
||||||
|
|
||||||
static int __init tc_filter_init(void)
|
static int __init tc_filter_init(void)
|
||||||
|
@ -3643,7 +3663,7 @@ static int __init tc_filter_init(void)
|
||||||
if (err)
|
if (err)
|
||||||
goto err_register_pernet_subsys;
|
goto err_register_pernet_subsys;
|
||||||
|
|
||||||
flow_indr_add_block_ing_cb(&block_ing_entry);
|
flow_indr_add_block_cb(&block_entry);
|
||||||
|
|
||||||
rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_new_tfilter, NULL,
|
rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_new_tfilter, NULL,
|
||||||
RTNL_FLAG_DOIT_UNLOCKED);
|
RTNL_FLAG_DOIT_UNLOCKED);
|
||||||
|
|
|
@ -56,8 +56,13 @@ struct fl_flow_key {
|
||||||
struct flow_dissector_key_ip ip;
|
struct flow_dissector_key_ip ip;
|
||||||
struct flow_dissector_key_ip enc_ip;
|
struct flow_dissector_key_ip enc_ip;
|
||||||
struct flow_dissector_key_enc_opts enc_opts;
|
struct flow_dissector_key_enc_opts enc_opts;
|
||||||
struct flow_dissector_key_ports tp_min;
|
union {
|
||||||
struct flow_dissector_key_ports tp_max;
|
struct flow_dissector_key_ports tp;
|
||||||
|
struct {
|
||||||
|
struct flow_dissector_key_ports tp_min;
|
||||||
|
struct flow_dissector_key_ports tp_max;
|
||||||
|
};
|
||||||
|
} tp_range;
|
||||||
struct flow_dissector_key_ct ct;
|
struct flow_dissector_key_ct ct;
|
||||||
} __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */
|
} __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */
|
||||||
|
|
||||||
|
@ -200,19 +205,19 @@ static bool fl_range_port_dst_cmp(struct cls_fl_filter *filter,
|
||||||
{
|
{
|
||||||
__be16 min_mask, max_mask, min_val, max_val;
|
__be16 min_mask, max_mask, min_val, max_val;
|
||||||
|
|
||||||
min_mask = htons(filter->mask->key.tp_min.dst);
|
min_mask = htons(filter->mask->key.tp_range.tp_min.dst);
|
||||||
max_mask = htons(filter->mask->key.tp_max.dst);
|
max_mask = htons(filter->mask->key.tp_range.tp_max.dst);
|
||||||
min_val = htons(filter->key.tp_min.dst);
|
min_val = htons(filter->key.tp_range.tp_min.dst);
|
||||||
max_val = htons(filter->key.tp_max.dst);
|
max_val = htons(filter->key.tp_range.tp_max.dst);
|
||||||
|
|
||||||
if (min_mask && max_mask) {
|
if (min_mask && max_mask) {
|
||||||
if (htons(key->tp.dst) < min_val ||
|
if (htons(key->tp_range.tp.dst) < min_val ||
|
||||||
htons(key->tp.dst) > max_val)
|
htons(key->tp_range.tp.dst) > max_val)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
/* skb does not have min and max values */
|
/* skb does not have min and max values */
|
||||||
mkey->tp_min.dst = filter->mkey.tp_min.dst;
|
mkey->tp_range.tp_min.dst = filter->mkey.tp_range.tp_min.dst;
|
||||||
mkey->tp_max.dst = filter->mkey.tp_max.dst;
|
mkey->tp_range.tp_max.dst = filter->mkey.tp_range.tp_max.dst;
|
||||||
}
|
}
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
@ -223,19 +228,19 @@ static bool fl_range_port_src_cmp(struct cls_fl_filter *filter,
|
||||||
{
|
{
|
||||||
__be16 min_mask, max_mask, min_val, max_val;
|
__be16 min_mask, max_mask, min_val, max_val;
|
||||||
|
|
||||||
min_mask = htons(filter->mask->key.tp_min.src);
|
min_mask = htons(filter->mask->key.tp_range.tp_min.src);
|
||||||
max_mask = htons(filter->mask->key.tp_max.src);
|
max_mask = htons(filter->mask->key.tp_range.tp_max.src);
|
||||||
min_val = htons(filter->key.tp_min.src);
|
min_val = htons(filter->key.tp_range.tp_min.src);
|
||||||
max_val = htons(filter->key.tp_max.src);
|
max_val = htons(filter->key.tp_range.tp_max.src);
|
||||||
|
|
||||||
if (min_mask && max_mask) {
|
if (min_mask && max_mask) {
|
||||||
if (htons(key->tp.src) < min_val ||
|
if (htons(key->tp_range.tp.src) < min_val ||
|
||||||
htons(key->tp.src) > max_val)
|
htons(key->tp_range.tp.src) > max_val)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
/* skb does not have min and max values */
|
/* skb does not have min and max values */
|
||||||
mkey->tp_min.src = filter->mkey.tp_min.src;
|
mkey->tp_range.tp_min.src = filter->mkey.tp_range.tp_min.src;
|
||||||
mkey->tp_max.src = filter->mkey.tp_max.src;
|
mkey->tp_range.tp_max.src = filter->mkey.tp_range.tp_max.src;
|
||||||
}
|
}
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
@ -734,23 +739,25 @@ static void fl_set_key_val(struct nlattr **tb,
|
||||||
static int fl_set_key_port_range(struct nlattr **tb, struct fl_flow_key *key,
|
static int fl_set_key_port_range(struct nlattr **tb, struct fl_flow_key *key,
|
||||||
struct fl_flow_key *mask)
|
struct fl_flow_key *mask)
|
||||||
{
|
{
|
||||||
fl_set_key_val(tb, &key->tp_min.dst,
|
fl_set_key_val(tb, &key->tp_range.tp_min.dst,
|
||||||
TCA_FLOWER_KEY_PORT_DST_MIN, &mask->tp_min.dst,
|
TCA_FLOWER_KEY_PORT_DST_MIN, &mask->tp_range.tp_min.dst,
|
||||||
TCA_FLOWER_UNSPEC, sizeof(key->tp_min.dst));
|
TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_min.dst));
|
||||||
fl_set_key_val(tb, &key->tp_max.dst,
|
fl_set_key_val(tb, &key->tp_range.tp_max.dst,
|
||||||
TCA_FLOWER_KEY_PORT_DST_MAX, &mask->tp_max.dst,
|
TCA_FLOWER_KEY_PORT_DST_MAX, &mask->tp_range.tp_max.dst,
|
||||||
TCA_FLOWER_UNSPEC, sizeof(key->tp_max.dst));
|
TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_max.dst));
|
||||||
fl_set_key_val(tb, &key->tp_min.src,
|
fl_set_key_val(tb, &key->tp_range.tp_min.src,
|
||||||
TCA_FLOWER_KEY_PORT_SRC_MIN, &mask->tp_min.src,
|
TCA_FLOWER_KEY_PORT_SRC_MIN, &mask->tp_range.tp_min.src,
|
||||||
TCA_FLOWER_UNSPEC, sizeof(key->tp_min.src));
|
TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_min.src));
|
||||||
fl_set_key_val(tb, &key->tp_max.src,
|
fl_set_key_val(tb, &key->tp_range.tp_max.src,
|
||||||
TCA_FLOWER_KEY_PORT_SRC_MAX, &mask->tp_max.src,
|
TCA_FLOWER_KEY_PORT_SRC_MAX, &mask->tp_range.tp_max.src,
|
||||||
TCA_FLOWER_UNSPEC, sizeof(key->tp_max.src));
|
TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_max.src));
|
||||||
|
|
||||||
if ((mask->tp_min.dst && mask->tp_max.dst &&
|
if ((mask->tp_range.tp_min.dst && mask->tp_range.tp_max.dst &&
|
||||||
htons(key->tp_max.dst) <= htons(key->tp_min.dst)) ||
|
htons(key->tp_range.tp_max.dst) <=
|
||||||
(mask->tp_min.src && mask->tp_max.src &&
|
htons(key->tp_range.tp_min.dst)) ||
|
||||||
htons(key->tp_max.src) <= htons(key->tp_min.src)))
|
(mask->tp_range.tp_min.src && mask->tp_range.tp_max.src &&
|
||||||
|
htons(key->tp_range.tp_max.src) <=
|
||||||
|
htons(key->tp_range.tp_min.src)))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -1509,9 +1516,10 @@ static void fl_init_dissector(struct flow_dissector *dissector,
|
||||||
FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4);
|
FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4);
|
||||||
FL_KEY_SET_IF_MASKED(mask, keys, cnt,
|
FL_KEY_SET_IF_MASKED(mask, keys, cnt,
|
||||||
FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6);
|
FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6);
|
||||||
if (FL_KEY_IS_MASKED(mask, tp) ||
|
FL_KEY_SET_IF_MASKED(mask, keys, cnt,
|
||||||
FL_KEY_IS_MASKED(mask, tp_min) || FL_KEY_IS_MASKED(mask, tp_max))
|
FLOW_DISSECTOR_KEY_PORTS, tp);
|
||||||
FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_PORTS, tp);
|
FL_KEY_SET_IF_MASKED(mask, keys, cnt,
|
||||||
|
FLOW_DISSECTOR_KEY_PORTS_RANGE, tp_range);
|
||||||
FL_KEY_SET_IF_MASKED(mask, keys, cnt,
|
FL_KEY_SET_IF_MASKED(mask, keys, cnt,
|
||||||
FLOW_DISSECTOR_KEY_IP, ip);
|
FLOW_DISSECTOR_KEY_IP, ip);
|
||||||
FL_KEY_SET_IF_MASKED(mask, keys, cnt,
|
FL_KEY_SET_IF_MASKED(mask, keys, cnt,
|
||||||
|
@ -1560,8 +1568,10 @@ static struct fl_flow_mask *fl_create_new_mask(struct cls_fl_head *head,
|
||||||
|
|
||||||
fl_mask_copy(newmask, mask);
|
fl_mask_copy(newmask, mask);
|
||||||
|
|
||||||
if ((newmask->key.tp_min.dst && newmask->key.tp_max.dst) ||
|
if ((newmask->key.tp_range.tp_min.dst &&
|
||||||
(newmask->key.tp_min.src && newmask->key.tp_max.src))
|
newmask->key.tp_range.tp_max.dst) ||
|
||||||
|
(newmask->key.tp_range.tp_min.src &&
|
||||||
|
newmask->key.tp_range.tp_max.src))
|
||||||
newmask->flags |= TCA_FLOWER_MASK_FLAGS_RANGE;
|
newmask->flags |= TCA_FLOWER_MASK_FLAGS_RANGE;
|
||||||
|
|
||||||
err = fl_init_mask_hashtable(newmask);
|
err = fl_init_mask_hashtable(newmask);
|
||||||
|
@ -2159,18 +2169,22 @@ static int fl_dump_key_val(struct sk_buff *skb,
|
||||||
static int fl_dump_key_port_range(struct sk_buff *skb, struct fl_flow_key *key,
|
static int fl_dump_key_port_range(struct sk_buff *skb, struct fl_flow_key *key,
|
||||||
struct fl_flow_key *mask)
|
struct fl_flow_key *mask)
|
||||||
{
|
{
|
||||||
if (fl_dump_key_val(skb, &key->tp_min.dst, TCA_FLOWER_KEY_PORT_DST_MIN,
|
if (fl_dump_key_val(skb, &key->tp_range.tp_min.dst,
|
||||||
&mask->tp_min.dst, TCA_FLOWER_UNSPEC,
|
TCA_FLOWER_KEY_PORT_DST_MIN,
|
||||||
sizeof(key->tp_min.dst)) ||
|
&mask->tp_range.tp_min.dst, TCA_FLOWER_UNSPEC,
|
||||||
fl_dump_key_val(skb, &key->tp_max.dst, TCA_FLOWER_KEY_PORT_DST_MAX,
|
sizeof(key->tp_range.tp_min.dst)) ||
|
||||||
&mask->tp_max.dst, TCA_FLOWER_UNSPEC,
|
fl_dump_key_val(skb, &key->tp_range.tp_max.dst,
|
||||||
sizeof(key->tp_max.dst)) ||
|
TCA_FLOWER_KEY_PORT_DST_MAX,
|
||||||
fl_dump_key_val(skb, &key->tp_min.src, TCA_FLOWER_KEY_PORT_SRC_MIN,
|
&mask->tp_range.tp_max.dst, TCA_FLOWER_UNSPEC,
|
||||||
&mask->tp_min.src, TCA_FLOWER_UNSPEC,
|
sizeof(key->tp_range.tp_max.dst)) ||
|
||||||
sizeof(key->tp_min.src)) ||
|
fl_dump_key_val(skb, &key->tp_range.tp_min.src,
|
||||||
fl_dump_key_val(skb, &key->tp_max.src, TCA_FLOWER_KEY_PORT_SRC_MAX,
|
TCA_FLOWER_KEY_PORT_SRC_MIN,
|
||||||
&mask->tp_max.src, TCA_FLOWER_UNSPEC,
|
&mask->tp_range.tp_min.src, TCA_FLOWER_UNSPEC,
|
||||||
sizeof(key->tp_max.src)))
|
sizeof(key->tp_range.tp_min.src)) ||
|
||||||
|
fl_dump_key_val(skb, &key->tp_range.tp_max.src,
|
||||||
|
TCA_FLOWER_KEY_PORT_SRC_MAX,
|
||||||
|
&mask->tp_range.tp_max.src, TCA_FLOWER_UNSPEC,
|
||||||
|
sizeof(key->tp_range.tp_max.src)))
|
||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -2184,6 +2184,7 @@ static const struct nla_policy cake_policy[TCA_CAKE_MAX + 1] = {
|
||||||
[TCA_CAKE_MPU] = { .type = NLA_U32 },
|
[TCA_CAKE_MPU] = { .type = NLA_U32 },
|
||||||
[TCA_CAKE_INGRESS] = { .type = NLA_U32 },
|
[TCA_CAKE_INGRESS] = { .type = NLA_U32 },
|
||||||
[TCA_CAKE_ACK_FILTER] = { .type = NLA_U32 },
|
[TCA_CAKE_ACK_FILTER] = { .type = NLA_U32 },
|
||||||
|
[TCA_CAKE_SPLIT_GSO] = { .type = NLA_U32 },
|
||||||
[TCA_CAKE_FWMARK] = { .type = NLA_U32 },
|
[TCA_CAKE_FWMARK] = { .type = NLA_U32 },
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -153,6 +153,7 @@ static int mq_dump(struct Qdisc *sch, struct sk_buff *skb)
|
||||||
__gnet_stats_copy_queue(&sch->qstats,
|
__gnet_stats_copy_queue(&sch->qstats,
|
||||||
qdisc->cpu_qstats,
|
qdisc->cpu_qstats,
|
||||||
&qdisc->qstats, qlen);
|
&qdisc->qstats, qlen);
|
||||||
|
sch->q.qlen += qlen;
|
||||||
} else {
|
} else {
|
||||||
sch->q.qlen += qdisc->q.qlen;
|
sch->q.qlen += qdisc->q.qlen;
|
||||||
sch->bstats.bytes += qdisc->bstats.bytes;
|
sch->bstats.bytes += qdisc->bstats.bytes;
|
||||||
|
|
|
@ -411,6 +411,7 @@ static int mqprio_dump(struct Qdisc *sch, struct sk_buff *skb)
|
||||||
__gnet_stats_copy_queue(&sch->qstats,
|
__gnet_stats_copy_queue(&sch->qstats,
|
||||||
qdisc->cpu_qstats,
|
qdisc->cpu_qstats,
|
||||||
&qdisc->qstats, qlen);
|
&qdisc->qstats, qlen);
|
||||||
|
sch->q.qlen += qlen;
|
||||||
} else {
|
} else {
|
||||||
sch->q.qlen += qdisc->q.qlen;
|
sch->q.qlen += qdisc->q.qlen;
|
||||||
sch->bstats.bytes += qdisc->bstats.bytes;
|
sch->bstats.bytes += qdisc->bstats.bytes;
|
||||||
|
@ -433,7 +434,7 @@ static int mqprio_dump(struct Qdisc *sch, struct sk_buff *skb)
|
||||||
opt.offset[tc] = dev->tc_to_txq[tc].offset;
|
opt.offset[tc] = dev->tc_to_txq[tc].offset;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (nla_put(skb, TCA_OPTIONS, NLA_ALIGN(sizeof(opt)), &opt))
|
if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
|
||||||
goto nla_put_failure;
|
goto nla_put_failure;
|
||||||
|
|
||||||
if ((priv->flags & TC_MQPRIO_F_MODE) &&
|
if ((priv->flags & TC_MQPRIO_F_MODE) &&
|
||||||
|
|
|
@ -275,7 +275,7 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
|
||||||
final_p = fl6_update_dst(fl6, rcu_dereference(np->opt), &final);
|
final_p = fl6_update_dst(fl6, rcu_dereference(np->opt), &final);
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
|
|
||||||
dst = ip6_dst_lookup_flow(sk, fl6, final_p);
|
dst = ip6_dst_lookup_flow(sock_net(sk), sk, fl6, final_p);
|
||||||
if (!asoc || saddr)
|
if (!asoc || saddr)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
|
@ -328,7 +328,7 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
|
||||||
fl6->saddr = laddr->a.v6.sin6_addr;
|
fl6->saddr = laddr->a.v6.sin6_addr;
|
||||||
fl6->fl6_sport = laddr->a.v6.sin6_port;
|
fl6->fl6_sport = laddr->a.v6.sin6_port;
|
||||||
final_p = fl6_update_dst(fl6, rcu_dereference(np->opt), &final);
|
final_p = fl6_update_dst(fl6, rcu_dereference(np->opt), &final);
|
||||||
bdst = ip6_dst_lookup_flow(sk, fl6, final_p);
|
bdst = ip6_dst_lookup_flow(sock_net(sk), sk, fl6, final_p);
|
||||||
|
|
||||||
if (IS_ERR(bdst))
|
if (IS_ERR(bdst))
|
||||||
continue;
|
continue;
|
||||||
|
|
|
@ -2546,7 +2546,12 @@ static int ____sys_recvmsg(struct socket *sock, struct msghdr *msg_sys,
|
||||||
|
|
||||||
if (sock->file->f_flags & O_NONBLOCK)
|
if (sock->file->f_flags & O_NONBLOCK)
|
||||||
flags |= MSG_DONTWAIT;
|
flags |= MSG_DONTWAIT;
|
||||||
err = (nosec ? sock_recvmsg_nosec : sock_recvmsg)(sock, msg_sys, flags);
|
|
||||||
|
if (unlikely(nosec))
|
||||||
|
err = sock_recvmsg_nosec(sock, msg_sys, flags);
|
||||||
|
else
|
||||||
|
err = sock_recvmsg(sock, msg_sys, flags);
|
||||||
|
|
||||||
if (err < 0)
|
if (err < 0)
|
||||||
goto out;
|
goto out;
|
||||||
len = err;
|
len = err;
|
||||||
|
|
|
@ -148,14 +148,6 @@ static int __init tipc_init(void)
|
||||||
sysctl_tipc_rmem[1] = RCVBUF_DEF;
|
sysctl_tipc_rmem[1] = RCVBUF_DEF;
|
||||||
sysctl_tipc_rmem[2] = RCVBUF_MAX;
|
sysctl_tipc_rmem[2] = RCVBUF_MAX;
|
||||||
|
|
||||||
err = tipc_netlink_start();
|
|
||||||
if (err)
|
|
||||||
goto out_netlink;
|
|
||||||
|
|
||||||
err = tipc_netlink_compat_start();
|
|
||||||
if (err)
|
|
||||||
goto out_netlink_compat;
|
|
||||||
|
|
||||||
err = tipc_register_sysctl();
|
err = tipc_register_sysctl();
|
||||||
if (err)
|
if (err)
|
||||||
goto out_sysctl;
|
goto out_sysctl;
|
||||||
|
@ -180,8 +172,21 @@ static int __init tipc_init(void)
|
||||||
if (err)
|
if (err)
|
||||||
goto out_bearer;
|
goto out_bearer;
|
||||||
|
|
||||||
|
err = tipc_netlink_start();
|
||||||
|
if (err)
|
||||||
|
goto out_netlink;
|
||||||
|
|
||||||
|
err = tipc_netlink_compat_start();
|
||||||
|
if (err)
|
||||||
|
goto out_netlink_compat;
|
||||||
|
|
||||||
pr_info("Started in single node mode\n");
|
pr_info("Started in single node mode\n");
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
out_netlink_compat:
|
||||||
|
tipc_netlink_stop();
|
||||||
|
out_netlink:
|
||||||
|
tipc_bearer_cleanup();
|
||||||
out_bearer:
|
out_bearer:
|
||||||
unregister_pernet_subsys(&tipc_pernet_pre_exit_ops);
|
unregister_pernet_subsys(&tipc_pernet_pre_exit_ops);
|
||||||
out_register_pernet_subsys:
|
out_register_pernet_subsys:
|
||||||
|
@ -193,23 +198,19 @@ out_socket:
|
||||||
out_pernet:
|
out_pernet:
|
||||||
tipc_unregister_sysctl();
|
tipc_unregister_sysctl();
|
||||||
out_sysctl:
|
out_sysctl:
|
||||||
tipc_netlink_compat_stop();
|
|
||||||
out_netlink_compat:
|
|
||||||
tipc_netlink_stop();
|
|
||||||
out_netlink:
|
|
||||||
pr_err("Unable to start in single node mode\n");
|
pr_err("Unable to start in single node mode\n");
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __exit tipc_exit(void)
|
static void __exit tipc_exit(void)
|
||||||
{
|
{
|
||||||
|
tipc_netlink_compat_stop();
|
||||||
|
tipc_netlink_stop();
|
||||||
tipc_bearer_cleanup();
|
tipc_bearer_cleanup();
|
||||||
unregister_pernet_subsys(&tipc_pernet_pre_exit_ops);
|
unregister_pernet_subsys(&tipc_pernet_pre_exit_ops);
|
||||||
unregister_pernet_device(&tipc_topsrv_net_ops);
|
unregister_pernet_device(&tipc_topsrv_net_ops);
|
||||||
tipc_socket_stop();
|
tipc_socket_stop();
|
||||||
unregister_pernet_device(&tipc_net_ops);
|
unregister_pernet_device(&tipc_net_ops);
|
||||||
tipc_netlink_stop();
|
|
||||||
tipc_netlink_compat_stop();
|
|
||||||
tipc_unregister_sysctl();
|
tipc_unregister_sysctl();
|
||||||
|
|
||||||
pr_info("Deactivated\n");
|
pr_info("Deactivated\n");
|
||||||
|
|
|
@ -195,10 +195,13 @@ static int tipc_udp_xmit(struct net *net, struct sk_buff *skb,
|
||||||
.saddr = src->ipv6,
|
.saddr = src->ipv6,
|
||||||
.flowi6_proto = IPPROTO_UDP
|
.flowi6_proto = IPPROTO_UDP
|
||||||
};
|
};
|
||||||
err = ipv6_stub->ipv6_dst_lookup(net, ub->ubsock->sk,
|
ndst = ipv6_stub->ipv6_dst_lookup_flow(net,
|
||||||
&ndst, &fl6);
|
ub->ubsock->sk,
|
||||||
if (err)
|
&fl6, NULL);
|
||||||
|
if (IS_ERR(ndst)) {
|
||||||
|
err = PTR_ERR(ndst);
|
||||||
goto tx_error;
|
goto tx_error;
|
||||||
|
}
|
||||||
dst_cache_set_ip6(cache, ndst, &fl6.saddr);
|
dst_cache_set_ip6(cache, ndst, &fl6.saddr);
|
||||||
}
|
}
|
||||||
ttl = ip6_dst_hoplimit(ndst);
|
ttl = ip6_dst_hoplimit(ndst);
|
||||||
|
|
|
@ -429,7 +429,7 @@ static int tls_push_data(struct sock *sk,
|
||||||
|
|
||||||
if (flags &
|
if (flags &
|
||||||
~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL | MSG_SENDPAGE_NOTLAST))
|
~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL | MSG_SENDPAGE_NOTLAST))
|
||||||
return -ENOTSUPP;
|
return -EOPNOTSUPP;
|
||||||
|
|
||||||
if (unlikely(sk->sk_err))
|
if (unlikely(sk->sk_err))
|
||||||
return -sk->sk_err;
|
return -sk->sk_err;
|
||||||
|
@ -571,7 +571,7 @@ int tls_device_sendpage(struct sock *sk, struct page *page,
|
||||||
lock_sock(sk);
|
lock_sock(sk);
|
||||||
|
|
||||||
if (flags & MSG_OOB) {
|
if (flags & MSG_OOB) {
|
||||||
rc = -ENOTSUPP;
|
rc = -EOPNOTSUPP;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1023,7 +1023,7 @@ int tls_set_device_offload(struct sock *sk, struct tls_context *ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!(netdev->features & NETIF_F_HW_TLS_TX)) {
|
if (!(netdev->features & NETIF_F_HW_TLS_TX)) {
|
||||||
rc = -ENOTSUPP;
|
rc = -EOPNOTSUPP;
|
||||||
goto release_netdev;
|
goto release_netdev;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1098,7 +1098,7 @@ int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!(netdev->features & NETIF_F_HW_TLS_RX)) {
|
if (!(netdev->features & NETIF_F_HW_TLS_RX)) {
|
||||||
rc = -ENOTSUPP;
|
rc = -EOPNOTSUPP;
|
||||||
goto release_netdev;
|
goto release_netdev;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -487,7 +487,7 @@ static int do_tls_setsockopt_conf(struct sock *sk, char __user *optval,
|
||||||
/* check version */
|
/* check version */
|
||||||
if (crypto_info->version != TLS_1_2_VERSION &&
|
if (crypto_info->version != TLS_1_2_VERSION &&
|
||||||
crypto_info->version != TLS_1_3_VERSION) {
|
crypto_info->version != TLS_1_3_VERSION) {
|
||||||
rc = -ENOTSUPP;
|
rc = -EINVAL;
|
||||||
goto err_crypto_info;
|
goto err_crypto_info;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -714,7 +714,7 @@ static int tls_init(struct sock *sk)
|
||||||
* share the ulp context.
|
* share the ulp context.
|
||||||
*/
|
*/
|
||||||
if (sk->sk_state != TCP_ESTABLISHED)
|
if (sk->sk_state != TCP_ESTABLISHED)
|
||||||
return -ENOTSUPP;
|
return -ENOTCONN;
|
||||||
|
|
||||||
/* allocate tls context */
|
/* allocate tls context */
|
||||||
write_lock_bh(&sk->sk_callback_lock);
|
write_lock_bh(&sk->sk_callback_lock);
|
||||||
|
|
|
@ -905,7 +905,7 @@ int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL))
|
if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL))
|
||||||
return -ENOTSUPP;
|
return -EOPNOTSUPP;
|
||||||
|
|
||||||
mutex_lock(&tls_ctx->tx_lock);
|
mutex_lock(&tls_ctx->tx_lock);
|
||||||
lock_sock(sk);
|
lock_sock(sk);
|
||||||
|
@ -1220,7 +1220,7 @@ int tls_sw_sendpage_locked(struct sock *sk, struct page *page,
|
||||||
if (flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
|
if (flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
|
||||||
MSG_SENDPAGE_NOTLAST | MSG_SENDPAGE_NOPOLICY |
|
MSG_SENDPAGE_NOTLAST | MSG_SENDPAGE_NOPOLICY |
|
||||||
MSG_NO_SHARED_FRAGS))
|
MSG_NO_SHARED_FRAGS))
|
||||||
return -ENOTSUPP;
|
return -EOPNOTSUPP;
|
||||||
|
|
||||||
return tls_sw_do_sendpage(sk, page, offset, size, flags);
|
return tls_sw_do_sendpage(sk, page, offset, size, flags);
|
||||||
}
|
}
|
||||||
|
@ -1233,7 +1233,7 @@ int tls_sw_sendpage(struct sock *sk, struct page *page,
|
||||||
|
|
||||||
if (flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
|
if (flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
|
||||||
MSG_SENDPAGE_NOTLAST | MSG_SENDPAGE_NOPOLICY))
|
MSG_SENDPAGE_NOTLAST | MSG_SENDPAGE_NOPOLICY))
|
||||||
return -ENOTSUPP;
|
return -EOPNOTSUPP;
|
||||||
|
|
||||||
mutex_lock(&tls_ctx->tx_lock);
|
mutex_lock(&tls_ctx->tx_lock);
|
||||||
lock_sock(sk);
|
lock_sock(sk);
|
||||||
|
@ -1932,7 +1932,7 @@ ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos,
|
||||||
|
|
||||||
/* splice does not support reading control messages */
|
/* splice does not support reading control messages */
|
||||||
if (ctx->control != TLS_RECORD_TYPE_DATA) {
|
if (ctx->control != TLS_RECORD_TYPE_DATA) {
|
||||||
err = -ENOTSUPP;
|
err = -EINVAL;
|
||||||
goto splice_read_end;
|
goto splice_read_end;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -489,9 +489,9 @@ int main(int argc, char **argv)
|
||||||
if (bpf_prog_load_xattr(&prog_load_attr, &obj, &prog_fd))
|
if (bpf_prog_load_xattr(&prog_load_attr, &obj, &prog_fd))
|
||||||
return EXIT_FAIL;
|
return EXIT_FAIL;
|
||||||
|
|
||||||
map = bpf_map__next(NULL, obj);
|
map = bpf_object__find_map_by_name(obj, "config_map");
|
||||||
stats_global_map = bpf_map__next(map, obj);
|
stats_global_map = bpf_object__find_map_by_name(obj, "stats_global_map");
|
||||||
rx_queue_index_map = bpf_map__next(stats_global_map, obj);
|
rx_queue_index_map = bpf_object__find_map_by_name(obj, "rx_queue_index_map");
|
||||||
if (!map || !stats_global_map || !rx_queue_index_map) {
|
if (!map || !stats_global_map || !rx_queue_index_map) {
|
||||||
printf("finding a map in obj file failed\n");
|
printf("finding a map in obj file failed\n");
|
||||||
return EXIT_FAIL;
|
return EXIT_FAIL;
|
||||||
|
|
|
@ -127,7 +127,9 @@ gen_btf()
|
||||||
cut -d, -f1 | cut -d' ' -f2)
|
cut -d, -f1 | cut -d' ' -f2)
|
||||||
bin_format=$(LANG=C ${OBJDUMP} -f ${1} | grep 'file format' | \
|
bin_format=$(LANG=C ${OBJDUMP} -f ${1} | grep 'file format' | \
|
||||||
awk '{print $4}')
|
awk '{print $4}')
|
||||||
${OBJCOPY} --dump-section .BTF=.btf.vmlinux.bin ${1} 2>/dev/null
|
${OBJCOPY} --change-section-address .BTF=0 \
|
||||||
|
--set-section-flags .BTF=alloc -O binary \
|
||||||
|
--only-section=.BTF ${1} .btf.vmlinux.bin
|
||||||
${OBJCOPY} -I binary -O ${bin_format} -B ${bin_arch} \
|
${OBJCOPY} -I binary -O ${bin_format} -B ${bin_arch} \
|
||||||
--rename-section .data=.BTF .btf.vmlinux.bin ${2}
|
--rename-section .data=.BTF .btf.vmlinux.bin ${2}
|
||||||
}
|
}
|
||||||
|
@ -253,6 +255,10 @@ btf_vmlinux_bin_o=""
|
||||||
if [ -n "${CONFIG_DEBUG_INFO_BTF}" ]; then
|
if [ -n "${CONFIG_DEBUG_INFO_BTF}" ]; then
|
||||||
if gen_btf .tmp_vmlinux.btf .btf.vmlinux.bin.o ; then
|
if gen_btf .tmp_vmlinux.btf .btf.vmlinux.bin.o ; then
|
||||||
btf_vmlinux_bin_o=.btf.vmlinux.bin.o
|
btf_vmlinux_bin_o=.btf.vmlinux.bin.o
|
||||||
|
else
|
||||||
|
echo >&2 "Failed to generate BTF for vmlinux"
|
||||||
|
echo >&2 "Try to disable CONFIG_DEBUG_INFO_BTF"
|
||||||
|
exit 1
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue