mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-03-16 12:14:06 +00:00
Merge branch 'dpaa2-eth-AF_XDP-zc'
Ioana Ciornei says: ==================== net: dpaa2-eth: AF_XDP zero-copy support This patch set adds support for AF_XDP zero-copy in the dpaa2-eth driver. The support is available on the LX2160A SoC and its variants and only on interfaces (DPNIs) with a maximum of 8 queues (HW limitations are the root cause). We are first implementing the .get_channels() callback since this a dependency for further work. Patches 2-3 are working on making the necessary changes for multiple buffer pools on a single interface. By default, without an AF_XDP socket attached, only a single buffer pool will be used and shared between all the queues. The changes in the functions are made in this patch, but the actual allocation and setup of a new BP is done in patch#10. Patches 4-5 are improving the information exposed in debugfs. We are exposing a new file to show which buffer pool is used by what channels and how many buffers it currently has. The 6th patch updates the dpni_set_pools() firmware API so that we are capable of setting up a different buffer per queue in later patches. In the 7th patch the generic dev_open/close APIs are used instead of the dpaa2-eth internal ones. Patches 8-9 are rearranging the existing code in dpaa2-eth.c in order to create new functions which will be used in the XSK implementation in dpaa2-xsk.c Finally, the last 3 patches are adding the actual support for both the Rx and Tx path of AF_XDP zero-copy and some associated tracepoints. Details on the implementation can be found in the actual patch. Changes in v2: - 3/12: Export dpaa2_eth_allocate_dpbp/dpaa2_eth_free_dpbp in this patch to avoid a build warning. The functions will be used in next patches. - 6/12: Use __le16 instead of u16 for the dpbp_id field. - 12/12: Use xdp_buff->data_hard_start when tracing the BP seeding. Changes in v3: - 3/12: fix leaking of bp on error path ==================== Acked-by: Björn Töpel <bjorn@kernel.org> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
225480f040
11 changed files with 1089 additions and 237 deletions
|
@ -6326,6 +6326,7 @@ F: drivers/net/ethernet/freescale/dpaa2/Kconfig
|
|||
F: drivers/net/ethernet/freescale/dpaa2/Makefile
|
||||
F: drivers/net/ethernet/freescale/dpaa2/dpaa2-eth*
|
||||
F: drivers/net/ethernet/freescale/dpaa2/dpaa2-mac*
|
||||
F: drivers/net/ethernet/freescale/dpaa2/dpaa2-xsk*
|
||||
F: drivers/net/ethernet/freescale/dpaa2/dpkg.h
|
||||
F: drivers/net/ethernet/freescale/dpaa2/dpmac*
|
||||
F: drivers/net/ethernet/freescale/dpaa2/dpni*
|
||||
|
|
|
@ -7,7 +7,7 @@ obj-$(CONFIG_FSL_DPAA2_ETH) += fsl-dpaa2-eth.o
|
|||
obj-$(CONFIG_FSL_DPAA2_PTP_CLOCK) += fsl-dpaa2-ptp.o
|
||||
obj-$(CONFIG_FSL_DPAA2_SWITCH) += fsl-dpaa2-switch.o
|
||||
|
||||
fsl-dpaa2-eth-objs := dpaa2-eth.o dpaa2-ethtool.o dpni.o dpaa2-mac.o dpmac.o dpaa2-eth-devlink.o
|
||||
fsl-dpaa2-eth-objs := dpaa2-eth.o dpaa2-ethtool.o dpni.o dpaa2-mac.o dpmac.o dpaa2-eth-devlink.o dpaa2-xsk.o
|
||||
fsl-dpaa2-eth-${CONFIG_FSL_DPAA2_ETH_DCB} += dpaa2-eth-dcb.o
|
||||
fsl-dpaa2-eth-${CONFIG_DEBUG_FS} += dpaa2-eth-debugfs.o
|
||||
fsl-dpaa2-ptp-objs := dpaa2-ptp.o dprtc.o
|
||||
|
|
|
@ -98,14 +98,14 @@ static int dpaa2_dbg_ch_show(struct seq_file *file, void *offset)
|
|||
int i;
|
||||
|
||||
seq_printf(file, "Channel stats for %s:\n", priv->net_dev->name);
|
||||
seq_printf(file, "%s%16s%16s%16s%16s%16s%16s\n",
|
||||
"CHID", "CPU", "Deq busy", "Frames", "CDANs",
|
||||
seq_printf(file, "%s %5s%16s%16s%16s%16s%16s%16s\n",
|
||||
"IDX", "CHID", "CPU", "Deq busy", "Frames", "CDANs",
|
||||
"Avg Frm/CDAN", "Buf count");
|
||||
|
||||
for (i = 0; i < priv->num_channels; i++) {
|
||||
ch = priv->channel[i];
|
||||
seq_printf(file, "%4d%16d%16llu%16llu%16llu%16llu%16d\n",
|
||||
ch->ch_id,
|
||||
seq_printf(file, "%3s%d%6d%16d%16llu%16llu%16llu%16llu%16d\n",
|
||||
"CH#", i, ch->ch_id,
|
||||
ch->nctx.desired_cpu,
|
||||
ch->stats.dequeue_portal_busy,
|
||||
ch->stats.frames,
|
||||
|
@ -119,6 +119,51 @@ static int dpaa2_dbg_ch_show(struct seq_file *file, void *offset)
|
|||
|
||||
DEFINE_SHOW_ATTRIBUTE(dpaa2_dbg_ch);
|
||||
|
||||
static int dpaa2_dbg_bp_show(struct seq_file *file, void *offset)
|
||||
{
|
||||
struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)file->private;
|
||||
int i, j, num_queues, buf_cnt;
|
||||
struct dpaa2_eth_bp *bp;
|
||||
char ch_name[10];
|
||||
int err;
|
||||
|
||||
/* Print out the header */
|
||||
seq_printf(file, "Buffer pool info for %s:\n", priv->net_dev->name);
|
||||
seq_printf(file, "%s %10s%15s", "IDX", "BPID", "Buf count");
|
||||
num_queues = dpaa2_eth_queue_count(priv);
|
||||
for (i = 0; i < num_queues; i++) {
|
||||
snprintf(ch_name, sizeof(ch_name), "CH#%d", i);
|
||||
seq_printf(file, "%10s", ch_name);
|
||||
}
|
||||
seq_printf(file, "\n");
|
||||
|
||||
/* For each buffer pool, print out its BPID, the number of buffers in
|
||||
* that buffer pool and the channels which are using it.
|
||||
*/
|
||||
for (i = 0; i < priv->num_bps; i++) {
|
||||
bp = priv->bp[i];
|
||||
|
||||
err = dpaa2_io_query_bp_count(NULL, bp->bpid, &buf_cnt);
|
||||
if (err) {
|
||||
netdev_warn(priv->net_dev, "Buffer count query error %d\n", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
seq_printf(file, "%3s%d%10d%15d", "BP#", i, bp->bpid, buf_cnt);
|
||||
for (j = 0; j < num_queues; j++) {
|
||||
if (priv->channel[j]->bp == bp)
|
||||
seq_printf(file, "%10s", "x");
|
||||
else
|
||||
seq_printf(file, "%10s", "");
|
||||
}
|
||||
seq_printf(file, "\n");
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
DEFINE_SHOW_ATTRIBUTE(dpaa2_dbg_bp);
|
||||
|
||||
void dpaa2_dbg_add(struct dpaa2_eth_priv *priv)
|
||||
{
|
||||
struct fsl_mc_device *dpni_dev;
|
||||
|
@ -139,6 +184,10 @@ void dpaa2_dbg_add(struct dpaa2_eth_priv *priv)
|
|||
|
||||
/* per-fq stats file */
|
||||
debugfs_create_file("ch_stats", 0444, dir, priv, &dpaa2_dbg_ch_fops);
|
||||
|
||||
/* per buffer pool stats file */
|
||||
debugfs_create_file("bp_stats", 0444, dir, priv, &dpaa2_dbg_bp_fops);
|
||||
|
||||
}
|
||||
|
||||
void dpaa2_dbg_remove(struct dpaa2_eth_priv *priv)
|
||||
|
|
|
@ -73,6 +73,14 @@ DEFINE_EVENT(dpaa2_eth_fd, dpaa2_tx_fd,
|
|||
TP_ARGS(netdev, fd)
|
||||
);
|
||||
|
||||
/* Tx (egress) XSK fd */
|
||||
DEFINE_EVENT(dpaa2_eth_fd, dpaa2_tx_xsk_fd,
|
||||
TP_PROTO(struct net_device *netdev,
|
||||
const struct dpaa2_fd *fd),
|
||||
|
||||
TP_ARGS(netdev, fd)
|
||||
);
|
||||
|
||||
/* Rx fd */
|
||||
DEFINE_EVENT(dpaa2_eth_fd, dpaa2_rx_fd,
|
||||
TP_PROTO(struct net_device *netdev,
|
||||
|
@ -81,6 +89,14 @@ DEFINE_EVENT(dpaa2_eth_fd, dpaa2_rx_fd,
|
|||
TP_ARGS(netdev, fd)
|
||||
);
|
||||
|
||||
/* Rx XSK fd */
|
||||
DEFINE_EVENT(dpaa2_eth_fd, dpaa2_rx_xsk_fd,
|
||||
TP_PROTO(struct net_device *netdev,
|
||||
const struct dpaa2_fd *fd),
|
||||
|
||||
TP_ARGS(netdev, fd)
|
||||
);
|
||||
|
||||
/* Tx confirmation fd */
|
||||
DEFINE_EVENT(dpaa2_eth_fd, dpaa2_tx_conf_fd,
|
||||
TP_PROTO(struct net_device *netdev,
|
||||
|
@ -90,57 +106,81 @@ DEFINE_EVENT(dpaa2_eth_fd, dpaa2_tx_conf_fd,
|
|||
);
|
||||
|
||||
/* Log data about raw buffers. Useful for tracing DPBP content. */
|
||||
TRACE_EVENT(dpaa2_eth_buf_seed,
|
||||
/* Trace function prototype */
|
||||
TP_PROTO(struct net_device *netdev,
|
||||
/* virtual address and size */
|
||||
void *vaddr,
|
||||
size_t size,
|
||||
/* dma map address and size */
|
||||
dma_addr_t dma_addr,
|
||||
size_t map_size,
|
||||
/* buffer pool id, if relevant */
|
||||
u16 bpid),
|
||||
DECLARE_EVENT_CLASS(dpaa2_eth_buf,
|
||||
/* Trace function prototype */
|
||||
TP_PROTO(struct net_device *netdev,
|
||||
/* virtual address and size */
|
||||
void *vaddr,
|
||||
size_t size,
|
||||
/* dma map address and size */
|
||||
dma_addr_t dma_addr,
|
||||
size_t map_size,
|
||||
/* buffer pool id, if relevant */
|
||||
u16 bpid),
|
||||
|
||||
/* Repeat argument list here */
|
||||
TP_ARGS(netdev, vaddr, size, dma_addr, map_size, bpid),
|
||||
/* Repeat argument list here */
|
||||
TP_ARGS(netdev, vaddr, size, dma_addr, map_size, bpid),
|
||||
|
||||
/* A structure containing the relevant information we want
|
||||
* to record. Declare name and type for each normal element,
|
||||
* name, type and size for arrays. Use __string for variable
|
||||
* length strings.
|
||||
*/
|
||||
TP_STRUCT__entry(
|
||||
__field(void *, vaddr)
|
||||
__field(size_t, size)
|
||||
__field(dma_addr_t, dma_addr)
|
||||
__field(size_t, map_size)
|
||||
__field(u16, bpid)
|
||||
__string(name, netdev->name)
|
||||
),
|
||||
/* A structure containing the relevant information we want
|
||||
* to record. Declare name and type for each normal element,
|
||||
* name, type and size for arrays. Use __string for variable
|
||||
* length strings.
|
||||
*/
|
||||
TP_STRUCT__entry(
|
||||
__field(void *, vaddr)
|
||||
__field(size_t, size)
|
||||
__field(dma_addr_t, dma_addr)
|
||||
__field(size_t, map_size)
|
||||
__field(u16, bpid)
|
||||
__string(name, netdev->name)
|
||||
),
|
||||
|
||||
/* The function that assigns values to the above declared
|
||||
* fields
|
||||
*/
|
||||
TP_fast_assign(
|
||||
__entry->vaddr = vaddr;
|
||||
__entry->size = size;
|
||||
__entry->dma_addr = dma_addr;
|
||||
__entry->map_size = map_size;
|
||||
__entry->bpid = bpid;
|
||||
__assign_str(name, netdev->name);
|
||||
),
|
||||
/* The function that assigns values to the above declared
|
||||
* fields
|
||||
*/
|
||||
TP_fast_assign(
|
||||
__entry->vaddr = vaddr;
|
||||
__entry->size = size;
|
||||
__entry->dma_addr = dma_addr;
|
||||
__entry->map_size = map_size;
|
||||
__entry->bpid = bpid;
|
||||
__assign_str(name, netdev->name);
|
||||
),
|
||||
|
||||
/* This is what gets printed when the trace event is
|
||||
* triggered.
|
||||
*/
|
||||
TP_printk(TR_BUF_FMT,
|
||||
__get_str(name),
|
||||
__entry->vaddr,
|
||||
__entry->size,
|
||||
&__entry->dma_addr,
|
||||
__entry->map_size,
|
||||
__entry->bpid)
|
||||
/* This is what gets printed when the trace event is
|
||||
* triggered.
|
||||
*/
|
||||
TP_printk(TR_BUF_FMT,
|
||||
__get_str(name),
|
||||
__entry->vaddr,
|
||||
__entry->size,
|
||||
&__entry->dma_addr,
|
||||
__entry->map_size,
|
||||
__entry->bpid)
|
||||
);
|
||||
|
||||
/* Main memory buff seeding */
|
||||
DEFINE_EVENT(dpaa2_eth_buf, dpaa2_eth_buf_seed,
|
||||
TP_PROTO(struct net_device *netdev,
|
||||
void *vaddr,
|
||||
size_t size,
|
||||
dma_addr_t dma_addr,
|
||||
size_t map_size,
|
||||
u16 bpid),
|
||||
|
||||
TP_ARGS(netdev, vaddr, size, dma_addr, map_size, bpid)
|
||||
);
|
||||
|
||||
/* UMEM buff seeding on AF_XDP fast path */
|
||||
DEFINE_EVENT(dpaa2_eth_buf, dpaa2_xsk_buf_seed,
|
||||
TP_PROTO(struct net_device *netdev,
|
||||
void *vaddr,
|
||||
size_t size,
|
||||
dma_addr_t dma_addr,
|
||||
size_t map_size,
|
||||
u16 bpid),
|
||||
|
||||
TP_ARGS(netdev, vaddr, size, dma_addr, map_size, bpid)
|
||||
);
|
||||
|
||||
/* If only one event of a certain type needs to be declared, use TRACE_EVENT().
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
|
||||
/* Copyright 2014-2016 Freescale Semiconductor Inc.
|
||||
* Copyright 2016-2020 NXP
|
||||
* Copyright 2016-2022 NXP
|
||||
*/
|
||||
#include <linux/init.h>
|
||||
#include <linux/module.h>
|
||||
|
@ -19,6 +19,7 @@
|
|||
#include <net/pkt_cls.h>
|
||||
#include <net/sock.h>
|
||||
#include <net/tso.h>
|
||||
#include <net/xdp_sock_drv.h>
|
||||
|
||||
#include "dpaa2-eth.h"
|
||||
|
||||
|
@ -104,8 +105,8 @@ static void dpaa2_ptp_onestep_reg_update_method(struct dpaa2_eth_priv *priv)
|
|||
priv->dpaa2_set_onestep_params_cb = dpaa2_update_ptp_onestep_direct;
|
||||
}
|
||||
|
||||
static void *dpaa2_iova_to_virt(struct iommu_domain *domain,
|
||||
dma_addr_t iova_addr)
|
||||
void *dpaa2_iova_to_virt(struct iommu_domain *domain,
|
||||
dma_addr_t iova_addr)
|
||||
{
|
||||
phys_addr_t phys_addr;
|
||||
|
||||
|
@ -279,23 +280,33 @@ static struct sk_buff *dpaa2_eth_build_frag_skb(struct dpaa2_eth_priv *priv,
|
|||
* be released in the pool
|
||||
*/
|
||||
static void dpaa2_eth_free_bufs(struct dpaa2_eth_priv *priv, u64 *buf_array,
|
||||
int count)
|
||||
int count, bool xsk_zc)
|
||||
{
|
||||
struct device *dev = priv->net_dev->dev.parent;
|
||||
struct dpaa2_eth_swa *swa;
|
||||
struct xdp_buff *xdp_buff;
|
||||
void *vaddr;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < count; i++) {
|
||||
vaddr = dpaa2_iova_to_virt(priv->iommu_domain, buf_array[i]);
|
||||
dma_unmap_page(dev, buf_array[i], priv->rx_buf_size,
|
||||
DMA_BIDIRECTIONAL);
|
||||
free_pages((unsigned long)vaddr, 0);
|
||||
|
||||
if (!xsk_zc) {
|
||||
dma_unmap_page(dev, buf_array[i], priv->rx_buf_size,
|
||||
DMA_BIDIRECTIONAL);
|
||||
free_pages((unsigned long)vaddr, 0);
|
||||
} else {
|
||||
swa = (struct dpaa2_eth_swa *)
|
||||
(vaddr + DPAA2_ETH_RX_HWA_SIZE);
|
||||
xdp_buff = swa->xsk.xdp_buff;
|
||||
xsk_buff_free(xdp_buff);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void dpaa2_eth_recycle_buf(struct dpaa2_eth_priv *priv,
|
||||
struct dpaa2_eth_channel *ch,
|
||||
dma_addr_t addr)
|
||||
void dpaa2_eth_recycle_buf(struct dpaa2_eth_priv *priv,
|
||||
struct dpaa2_eth_channel *ch,
|
||||
dma_addr_t addr)
|
||||
{
|
||||
int retries = 0;
|
||||
int err;
|
||||
|
@ -304,7 +315,7 @@ static void dpaa2_eth_recycle_buf(struct dpaa2_eth_priv *priv,
|
|||
if (ch->recycled_bufs_cnt < DPAA2_ETH_BUFS_PER_CMD)
|
||||
return;
|
||||
|
||||
while ((err = dpaa2_io_service_release(ch->dpio, priv->bpid,
|
||||
while ((err = dpaa2_io_service_release(ch->dpio, ch->bp->bpid,
|
||||
ch->recycled_bufs,
|
||||
ch->recycled_bufs_cnt)) == -EBUSY) {
|
||||
if (retries++ >= DPAA2_ETH_SWP_BUSY_RETRIES)
|
||||
|
@ -313,7 +324,8 @@ static void dpaa2_eth_recycle_buf(struct dpaa2_eth_priv *priv,
|
|||
}
|
||||
|
||||
if (err) {
|
||||
dpaa2_eth_free_bufs(priv, ch->recycled_bufs, ch->recycled_bufs_cnt);
|
||||
dpaa2_eth_free_bufs(priv, ch->recycled_bufs,
|
||||
ch->recycled_bufs_cnt, ch->xsk_zc);
|
||||
ch->buf_count -= ch->recycled_bufs_cnt;
|
||||
}
|
||||
|
||||
|
@ -377,10 +389,10 @@ static void dpaa2_eth_xdp_tx_flush(struct dpaa2_eth_priv *priv,
|
|||
fq->xdp_tx_fds.num = 0;
|
||||
}
|
||||
|
||||
static void dpaa2_eth_xdp_enqueue(struct dpaa2_eth_priv *priv,
|
||||
struct dpaa2_eth_channel *ch,
|
||||
struct dpaa2_fd *fd,
|
||||
void *buf_start, u16 queue_id)
|
||||
void dpaa2_eth_xdp_enqueue(struct dpaa2_eth_priv *priv,
|
||||
struct dpaa2_eth_channel *ch,
|
||||
struct dpaa2_fd *fd,
|
||||
void *buf_start, u16 queue_id)
|
||||
{
|
||||
struct dpaa2_faead *faead;
|
||||
struct dpaa2_fd *dest_fd;
|
||||
|
@ -485,19 +497,15 @@ out:
|
|||
return xdp_act;
|
||||
}
|
||||
|
||||
static struct sk_buff *dpaa2_eth_copybreak(struct dpaa2_eth_channel *ch,
|
||||
const struct dpaa2_fd *fd,
|
||||
void *fd_vaddr)
|
||||
struct sk_buff *dpaa2_eth_alloc_skb(struct dpaa2_eth_priv *priv,
|
||||
struct dpaa2_eth_channel *ch,
|
||||
const struct dpaa2_fd *fd, u32 fd_length,
|
||||
void *fd_vaddr)
|
||||
{
|
||||
u16 fd_offset = dpaa2_fd_get_offset(fd);
|
||||
struct dpaa2_eth_priv *priv = ch->priv;
|
||||
u32 fd_length = dpaa2_fd_get_len(fd);
|
||||
struct sk_buff *skb = NULL;
|
||||
unsigned int skb_len;
|
||||
|
||||
if (fd_length > priv->rx_copybreak)
|
||||
return NULL;
|
||||
|
||||
skb_len = fd_length + dpaa2_eth_needed_headroom(NULL);
|
||||
|
||||
skb = napi_alloc_skb(&ch->napi, skb_len);
|
||||
|
@ -514,11 +522,66 @@ static struct sk_buff *dpaa2_eth_copybreak(struct dpaa2_eth_channel *ch,
|
|||
return skb;
|
||||
}
|
||||
|
||||
static struct sk_buff *dpaa2_eth_copybreak(struct dpaa2_eth_channel *ch,
|
||||
const struct dpaa2_fd *fd,
|
||||
void *fd_vaddr)
|
||||
{
|
||||
struct dpaa2_eth_priv *priv = ch->priv;
|
||||
u32 fd_length = dpaa2_fd_get_len(fd);
|
||||
|
||||
if (fd_length > priv->rx_copybreak)
|
||||
return NULL;
|
||||
|
||||
return dpaa2_eth_alloc_skb(priv, ch, fd, fd_length, fd_vaddr);
|
||||
}
|
||||
|
||||
void dpaa2_eth_receive_skb(struct dpaa2_eth_priv *priv,
|
||||
struct dpaa2_eth_channel *ch,
|
||||
const struct dpaa2_fd *fd, void *vaddr,
|
||||
struct dpaa2_eth_fq *fq,
|
||||
struct rtnl_link_stats64 *percpu_stats,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
struct dpaa2_fas *fas;
|
||||
u32 status = 0;
|
||||
|
||||
fas = dpaa2_get_fas(vaddr, false);
|
||||
prefetch(fas);
|
||||
prefetch(skb->data);
|
||||
|
||||
/* Get the timestamp value */
|
||||
if (priv->rx_tstamp) {
|
||||
struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
|
||||
__le64 *ts = dpaa2_get_ts(vaddr, false);
|
||||
u64 ns;
|
||||
|
||||
memset(shhwtstamps, 0, sizeof(*shhwtstamps));
|
||||
|
||||
ns = DPAA2_PTP_CLK_PERIOD_NS * le64_to_cpup(ts);
|
||||
shhwtstamps->hwtstamp = ns_to_ktime(ns);
|
||||
}
|
||||
|
||||
/* Check if we need to validate the L4 csum */
|
||||
if (likely(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV)) {
|
||||
status = le32_to_cpu(fas->status);
|
||||
dpaa2_eth_validate_rx_csum(priv, status, skb);
|
||||
}
|
||||
|
||||
skb->protocol = eth_type_trans(skb, priv->net_dev);
|
||||
skb_record_rx_queue(skb, fq->flowid);
|
||||
|
||||
percpu_stats->rx_packets++;
|
||||
percpu_stats->rx_bytes += dpaa2_fd_get_len(fd);
|
||||
ch->stats.bytes_per_cdan += dpaa2_fd_get_len(fd);
|
||||
|
||||
list_add_tail(&skb->list, ch->rx_list);
|
||||
}
|
||||
|
||||
/* Main Rx frame processing routine */
|
||||
static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
|
||||
struct dpaa2_eth_channel *ch,
|
||||
const struct dpaa2_fd *fd,
|
||||
struct dpaa2_eth_fq *fq)
|
||||
void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
|
||||
struct dpaa2_eth_channel *ch,
|
||||
const struct dpaa2_fd *fd,
|
||||
struct dpaa2_eth_fq *fq)
|
||||
{
|
||||
dma_addr_t addr = dpaa2_fd_get_addr(fd);
|
||||
u8 fd_format = dpaa2_fd_get_format(fd);
|
||||
|
@ -527,9 +590,7 @@ static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
|
|||
struct rtnl_link_stats64 *percpu_stats;
|
||||
struct dpaa2_eth_drv_stats *percpu_extras;
|
||||
struct device *dev = priv->net_dev->dev.parent;
|
||||
struct dpaa2_fas *fas;
|
||||
void *buf_data;
|
||||
u32 status = 0;
|
||||
u32 xdp_act;
|
||||
|
||||
/* Tracing point */
|
||||
|
@ -539,8 +600,6 @@ static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
|
|||
dma_sync_single_for_cpu(dev, addr, priv->rx_buf_size,
|
||||
DMA_BIDIRECTIONAL);
|
||||
|
||||
fas = dpaa2_get_fas(vaddr, false);
|
||||
prefetch(fas);
|
||||
buf_data = vaddr + dpaa2_fd_get_offset(fd);
|
||||
prefetch(buf_data);
|
||||
|
||||
|
@ -578,35 +637,7 @@ static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
|
|||
if (unlikely(!skb))
|
||||
goto err_build_skb;
|
||||
|
||||
prefetch(skb->data);
|
||||
|
||||
/* Get the timestamp value */
|
||||
if (priv->rx_tstamp) {
|
||||
struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
|
||||
__le64 *ts = dpaa2_get_ts(vaddr, false);
|
||||
u64 ns;
|
||||
|
||||
memset(shhwtstamps, 0, sizeof(*shhwtstamps));
|
||||
|
||||
ns = DPAA2_PTP_CLK_PERIOD_NS * le64_to_cpup(ts);
|
||||
shhwtstamps->hwtstamp = ns_to_ktime(ns);
|
||||
}
|
||||
|
||||
/* Check if we need to validate the L4 csum */
|
||||
if (likely(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV)) {
|
||||
status = le32_to_cpu(fas->status);
|
||||
dpaa2_eth_validate_rx_csum(priv, status, skb);
|
||||
}
|
||||
|
||||
skb->protocol = eth_type_trans(skb, priv->net_dev);
|
||||
skb_record_rx_queue(skb, fq->flowid);
|
||||
|
||||
percpu_stats->rx_packets++;
|
||||
percpu_stats->rx_bytes += dpaa2_fd_get_len(fd);
|
||||
ch->stats.bytes_per_cdan += dpaa2_fd_get_len(fd);
|
||||
|
||||
list_add_tail(&skb->list, ch->rx_list);
|
||||
|
||||
dpaa2_eth_receive_skb(priv, ch, fd, vaddr, fq, percpu_stats, skb);
|
||||
return;
|
||||
|
||||
err_build_skb:
|
||||
|
@ -827,7 +858,7 @@ static void dpaa2_eth_enable_tx_tstamp(struct dpaa2_eth_priv *priv,
|
|||
}
|
||||
}
|
||||
|
||||
static void *dpaa2_eth_sgt_get(struct dpaa2_eth_priv *priv)
|
||||
void *dpaa2_eth_sgt_get(struct dpaa2_eth_priv *priv)
|
||||
{
|
||||
struct dpaa2_eth_sgt_cache *sgt_cache;
|
||||
void *sgt_buf = NULL;
|
||||
|
@ -849,7 +880,7 @@ static void *dpaa2_eth_sgt_get(struct dpaa2_eth_priv *priv)
|
|||
return sgt_buf;
|
||||
}
|
||||
|
||||
static void dpaa2_eth_sgt_recycle(struct dpaa2_eth_priv *priv, void *sgt_buf)
|
||||
void dpaa2_eth_sgt_recycle(struct dpaa2_eth_priv *priv, void *sgt_buf)
|
||||
{
|
||||
struct dpaa2_eth_sgt_cache *sgt_cache;
|
||||
|
||||
|
@ -1084,9 +1115,10 @@ static int dpaa2_eth_build_single_fd(struct dpaa2_eth_priv *priv,
|
|||
* This can be called either from dpaa2_eth_tx_conf() or on the error path of
|
||||
* dpaa2_eth_tx().
|
||||
*/
|
||||
static void dpaa2_eth_free_tx_fd(struct dpaa2_eth_priv *priv,
|
||||
struct dpaa2_eth_fq *fq,
|
||||
const struct dpaa2_fd *fd, bool in_napi)
|
||||
void dpaa2_eth_free_tx_fd(struct dpaa2_eth_priv *priv,
|
||||
struct dpaa2_eth_channel *ch,
|
||||
struct dpaa2_eth_fq *fq,
|
||||
const struct dpaa2_fd *fd, bool in_napi)
|
||||
{
|
||||
struct device *dev = priv->net_dev->dev.parent;
|
||||
dma_addr_t fd_addr, sg_addr;
|
||||
|
@ -1153,6 +1185,10 @@ static void dpaa2_eth_free_tx_fd(struct dpaa2_eth_priv *priv,
|
|||
|
||||
if (!swa->tso.is_last_fd)
|
||||
should_free_skb = 0;
|
||||
} else if (swa->type == DPAA2_ETH_SWA_XSK) {
|
||||
/* Unmap the SGT Buffer */
|
||||
dma_unmap_single(dev, fd_addr, swa->xsk.sgt_size,
|
||||
DMA_BIDIRECTIONAL);
|
||||
} else {
|
||||
skb = swa->single.skb;
|
||||
|
||||
|
@ -1170,6 +1206,12 @@ static void dpaa2_eth_free_tx_fd(struct dpaa2_eth_priv *priv,
|
|||
return;
|
||||
}
|
||||
|
||||
if (swa->type == DPAA2_ETH_SWA_XSK) {
|
||||
ch->xsk_tx_pkts_sent++;
|
||||
dpaa2_eth_sgt_recycle(priv, buffer_start);
|
||||
return;
|
||||
}
|
||||
|
||||
if (swa->type != DPAA2_ETH_SWA_XDP && in_napi) {
|
||||
fq->dq_frames++;
|
||||
fq->dq_bytes += fd_len;
|
||||
|
@ -1344,7 +1386,7 @@ err_alloc_tso_hdr:
|
|||
err_sgt_get:
|
||||
/* Free all the other FDs that were already fully created */
|
||||
for (i = 0; i < index; i++)
|
||||
dpaa2_eth_free_tx_fd(priv, NULL, &fd_start[i], false);
|
||||
dpaa2_eth_free_tx_fd(priv, NULL, NULL, &fd_start[i], false);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
@ -1460,7 +1502,7 @@ static netdev_tx_t __dpaa2_eth_tx(struct sk_buff *skb,
|
|||
if (unlikely(err < 0)) {
|
||||
percpu_stats->tx_errors++;
|
||||
/* Clean up everything, including freeing the skb */
|
||||
dpaa2_eth_free_tx_fd(priv, fq, fd, false);
|
||||
dpaa2_eth_free_tx_fd(priv, NULL, fq, fd, false);
|
||||
netdev_tx_completed_queue(nq, 1, fd_len);
|
||||
} else {
|
||||
percpu_stats->tx_packets += total_enqueued;
|
||||
|
@ -1553,7 +1595,7 @@ static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv,
|
|||
|
||||
/* Check frame errors in the FD field */
|
||||
fd_errors = dpaa2_fd_get_ctrl(fd) & DPAA2_FD_TX_ERR_MASK;
|
||||
dpaa2_eth_free_tx_fd(priv, fq, fd, true);
|
||||
dpaa2_eth_free_tx_fd(priv, ch, fq, fd, true);
|
||||
|
||||
if (likely(!fd_errors))
|
||||
return;
|
||||
|
@ -1631,44 +1673,76 @@ static int dpaa2_eth_set_tx_csum(struct dpaa2_eth_priv *priv, bool enable)
|
|||
* to the specified buffer pool
|
||||
*/
|
||||
static int dpaa2_eth_add_bufs(struct dpaa2_eth_priv *priv,
|
||||
struct dpaa2_eth_channel *ch, u16 bpid)
|
||||
struct dpaa2_eth_channel *ch)
|
||||
{
|
||||
struct xdp_buff *xdp_buffs[DPAA2_ETH_BUFS_PER_CMD];
|
||||
struct device *dev = priv->net_dev->dev.parent;
|
||||
u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
|
||||
struct dpaa2_eth_swa *swa;
|
||||
struct page *page;
|
||||
dma_addr_t addr;
|
||||
int retries = 0;
|
||||
int i, err;
|
||||
int i = 0, err;
|
||||
u32 batch;
|
||||
|
||||
for (i = 0; i < DPAA2_ETH_BUFS_PER_CMD; i++) {
|
||||
/* Allocate buffer visible to WRIOP + skb shared info +
|
||||
* alignment padding
|
||||
/* Allocate buffers visible to WRIOP */
|
||||
if (!ch->xsk_zc) {
|
||||
for (i = 0; i < DPAA2_ETH_BUFS_PER_CMD; i++) {
|
||||
/* Also allocate skb shared info and alignment padding.
|
||||
* There is one page for each Rx buffer. WRIOP sees
|
||||
* the entire page except for a tailroom reserved for
|
||||
* skb shared info
|
||||
*/
|
||||
page = dev_alloc_pages(0);
|
||||
if (!page)
|
||||
goto err_alloc;
|
||||
|
||||
addr = dma_map_page(dev, page, 0, priv->rx_buf_size,
|
||||
DMA_BIDIRECTIONAL);
|
||||
if (unlikely(dma_mapping_error(dev, addr)))
|
||||
goto err_map;
|
||||
|
||||
buf_array[i] = addr;
|
||||
|
||||
/* tracing point */
|
||||
trace_dpaa2_eth_buf_seed(priv->net_dev,
|
||||
page_address(page),
|
||||
DPAA2_ETH_RX_BUF_RAW_SIZE,
|
||||
addr, priv->rx_buf_size,
|
||||
ch->bp->bpid);
|
||||
}
|
||||
} else if (xsk_buff_can_alloc(ch->xsk_pool, DPAA2_ETH_BUFS_PER_CMD)) {
|
||||
/* Allocate XSK buffers for AF_XDP fast path in batches
|
||||
* of DPAA2_ETH_BUFS_PER_CMD. Bail out if the UMEM cannot
|
||||
* provide enough buffers at the moment
|
||||
*/
|
||||
/* allocate one page for each Rx buffer. WRIOP sees
|
||||
* the entire page except for a tailroom reserved for
|
||||
* skb shared info
|
||||
*/
|
||||
page = dev_alloc_pages(0);
|
||||
if (!page)
|
||||
batch = xsk_buff_alloc_batch(ch->xsk_pool, xdp_buffs,
|
||||
DPAA2_ETH_BUFS_PER_CMD);
|
||||
if (!batch)
|
||||
goto err_alloc;
|
||||
|
||||
addr = dma_map_page(dev, page, 0, priv->rx_buf_size,
|
||||
DMA_BIDIRECTIONAL);
|
||||
if (unlikely(dma_mapping_error(dev, addr)))
|
||||
goto err_map;
|
||||
for (i = 0; i < batch; i++) {
|
||||
swa = (struct dpaa2_eth_swa *)(xdp_buffs[i]->data_hard_start +
|
||||
DPAA2_ETH_RX_HWA_SIZE);
|
||||
swa->xsk.xdp_buff = xdp_buffs[i];
|
||||
|
||||
buf_array[i] = addr;
|
||||
addr = xsk_buff_xdp_get_frame_dma(xdp_buffs[i]);
|
||||
if (unlikely(dma_mapping_error(dev, addr)))
|
||||
goto err_map;
|
||||
|
||||
/* tracing point */
|
||||
trace_dpaa2_eth_buf_seed(priv->net_dev, page_address(page),
|
||||
DPAA2_ETH_RX_BUF_RAW_SIZE,
|
||||
addr, priv->rx_buf_size,
|
||||
bpid);
|
||||
buf_array[i] = addr;
|
||||
|
||||
trace_dpaa2_xsk_buf_seed(priv->net_dev,
|
||||
xdp_buffs[i]->data_hard_start,
|
||||
DPAA2_ETH_RX_BUF_RAW_SIZE,
|
||||
addr, priv->rx_buf_size,
|
||||
ch->bp->bpid);
|
||||
}
|
||||
}
|
||||
|
||||
release_bufs:
|
||||
/* In case the portal is busy, retry until successful */
|
||||
while ((err = dpaa2_io_service_release(ch->dpio, bpid,
|
||||
while ((err = dpaa2_io_service_release(ch->dpio, ch->bp->bpid,
|
||||
buf_array, i)) == -EBUSY) {
|
||||
if (retries++ >= DPAA2_ETH_SWP_BUSY_RETRIES)
|
||||
break;
|
||||
|
@ -1679,14 +1753,19 @@ release_bufs:
|
|||
* not much else we can do about it
|
||||
*/
|
||||
if (err) {
|
||||
dpaa2_eth_free_bufs(priv, buf_array, i);
|
||||
dpaa2_eth_free_bufs(priv, buf_array, i, ch->xsk_zc);
|
||||
return 0;
|
||||
}
|
||||
|
||||
return i;
|
||||
|
||||
err_map:
|
||||
__free_pages(page, 0);
|
||||
if (!ch->xsk_zc) {
|
||||
__free_pages(page, 0);
|
||||
} else {
|
||||
for (; i < batch; i++)
|
||||
xsk_buff_free(xdp_buffs[i]);
|
||||
}
|
||||
err_alloc:
|
||||
/* If we managed to allocate at least some buffers,
|
||||
* release them to hardware
|
||||
|
@ -1697,39 +1776,64 @@ err_alloc:
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int dpaa2_eth_seed_pool(struct dpaa2_eth_priv *priv, u16 bpid)
|
||||
static int dpaa2_eth_seed_pool(struct dpaa2_eth_priv *priv,
|
||||
struct dpaa2_eth_channel *ch)
|
||||
{
|
||||
int i, j;
|
||||
int i;
|
||||
int new_count;
|
||||
|
||||
for (j = 0; j < priv->num_channels; j++) {
|
||||
for (i = 0; i < DPAA2_ETH_NUM_BUFS;
|
||||
i += DPAA2_ETH_BUFS_PER_CMD) {
|
||||
new_count = dpaa2_eth_add_bufs(priv, priv->channel[j], bpid);
|
||||
priv->channel[j]->buf_count += new_count;
|
||||
for (i = 0; i < DPAA2_ETH_NUM_BUFS; i += DPAA2_ETH_BUFS_PER_CMD) {
|
||||
new_count = dpaa2_eth_add_bufs(priv, ch);
|
||||
ch->buf_count += new_count;
|
||||
|
||||
if (new_count < DPAA2_ETH_BUFS_PER_CMD) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
}
|
||||
if (new_count < DPAA2_ETH_BUFS_PER_CMD)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void dpaa2_eth_seed_pools(struct dpaa2_eth_priv *priv)
|
||||
{
|
||||
struct net_device *net_dev = priv->net_dev;
|
||||
struct dpaa2_eth_channel *channel;
|
||||
int i, err = 0;
|
||||
|
||||
for (i = 0; i < priv->num_channels; i++) {
|
||||
channel = priv->channel[i];
|
||||
|
||||
err = dpaa2_eth_seed_pool(priv, channel);
|
||||
|
||||
/* Not much to do; the buffer pool, though not filled up,
|
||||
* may still contain some buffers which would enable us
|
||||
* to limp on.
|
||||
*/
|
||||
if (err)
|
||||
netdev_err(net_dev, "Buffer seeding failed for DPBP %d (bpid=%d)\n",
|
||||
channel->bp->dev->obj_desc.id,
|
||||
channel->bp->bpid);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Drain the specified number of buffers from the DPNI's private buffer pool.
|
||||
* Drain the specified number of buffers from one of the DPNI's private buffer
|
||||
* pools.
|
||||
* @count must not exceeed DPAA2_ETH_BUFS_PER_CMD
|
||||
*/
|
||||
static void dpaa2_eth_drain_bufs(struct dpaa2_eth_priv *priv, int count)
|
||||
static void dpaa2_eth_drain_bufs(struct dpaa2_eth_priv *priv, int bpid,
|
||||
int count)
|
||||
{
|
||||
u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
|
||||
bool xsk_zc = false;
|
||||
int retries = 0;
|
||||
int ret;
|
||||
int i, ret;
|
||||
|
||||
for (i = 0; i < priv->num_channels; i++)
|
||||
if (priv->channel[i]->bp->bpid == bpid)
|
||||
xsk_zc = priv->channel[i]->xsk_zc;
|
||||
|
||||
do {
|
||||
ret = dpaa2_io_service_acquire(NULL, priv->bpid,
|
||||
buf_array, count);
|
||||
ret = dpaa2_io_service_acquire(NULL, bpid, buf_array, count);
|
||||
if (ret < 0) {
|
||||
if (ret == -EBUSY &&
|
||||
retries++ < DPAA2_ETH_SWP_BUSY_RETRIES)
|
||||
|
@ -1737,28 +1841,40 @@ static void dpaa2_eth_drain_bufs(struct dpaa2_eth_priv *priv, int count)
|
|||
netdev_err(priv->net_dev, "dpaa2_io_service_acquire() failed\n");
|
||||
return;
|
||||
}
|
||||
dpaa2_eth_free_bufs(priv, buf_array, ret);
|
||||
dpaa2_eth_free_bufs(priv, buf_array, ret, xsk_zc);
|
||||
retries = 0;
|
||||
} while (ret);
|
||||
}
|
||||
|
||||
static void dpaa2_eth_drain_pool(struct dpaa2_eth_priv *priv)
|
||||
static void dpaa2_eth_drain_pool(struct dpaa2_eth_priv *priv, int bpid)
|
||||
{
|
||||
int i;
|
||||
|
||||
dpaa2_eth_drain_bufs(priv, DPAA2_ETH_BUFS_PER_CMD);
|
||||
dpaa2_eth_drain_bufs(priv, 1);
|
||||
/* Drain the buffer pool */
|
||||
dpaa2_eth_drain_bufs(priv, bpid, DPAA2_ETH_BUFS_PER_CMD);
|
||||
dpaa2_eth_drain_bufs(priv, bpid, 1);
|
||||
|
||||
/* Setup to zero the buffer count of all channels which were
|
||||
* using this buffer pool.
|
||||
*/
|
||||
for (i = 0; i < priv->num_channels; i++)
|
||||
priv->channel[i]->buf_count = 0;
|
||||
if (priv->channel[i]->bp->bpid == bpid)
|
||||
priv->channel[i]->buf_count = 0;
|
||||
}
|
||||
|
||||
static void dpaa2_eth_drain_pools(struct dpaa2_eth_priv *priv)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < priv->num_bps; i++)
|
||||
dpaa2_eth_drain_pool(priv, priv->bp[i]->bpid);
|
||||
}
|
||||
|
||||
/* Function is called from softirq context only, so we don't need to guard
|
||||
* the access to percpu count
|
||||
*/
|
||||
static int dpaa2_eth_refill_pool(struct dpaa2_eth_priv *priv,
|
||||
struct dpaa2_eth_channel *ch,
|
||||
u16 bpid)
|
||||
struct dpaa2_eth_channel *ch)
|
||||
{
|
||||
int new_count;
|
||||
|
||||
|
@ -1766,7 +1882,7 @@ static int dpaa2_eth_refill_pool(struct dpaa2_eth_priv *priv,
|
|||
return 0;
|
||||
|
||||
do {
|
||||
new_count = dpaa2_eth_add_bufs(priv, ch, bpid);
|
||||
new_count = dpaa2_eth_add_bufs(priv, ch);
|
||||
if (unlikely(!new_count)) {
|
||||
/* Out of memory; abort for now, we'll try later on */
|
||||
break;
|
||||
|
@ -1830,6 +1946,7 @@ static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
|
|||
struct dpaa2_eth_fq *fq, *txc_fq = NULL;
|
||||
struct netdev_queue *nq;
|
||||
int store_cleaned, work_done;
|
||||
bool work_done_zc = false;
|
||||
struct list_head rx_list;
|
||||
int retries = 0;
|
||||
u16 flowid;
|
||||
|
@ -1842,13 +1959,22 @@ static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
|
|||
INIT_LIST_HEAD(&rx_list);
|
||||
ch->rx_list = &rx_list;
|
||||
|
||||
if (ch->xsk_zc) {
|
||||
work_done_zc = dpaa2_xsk_tx(priv, ch);
|
||||
/* If we reached the XSK Tx per NAPI threshold, we're done */
|
||||
if (work_done_zc) {
|
||||
work_done = budget;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
do {
|
||||
err = dpaa2_eth_pull_channel(ch);
|
||||
if (unlikely(err))
|
||||
break;
|
||||
|
||||
/* Refill pool if appropriate */
|
||||
dpaa2_eth_refill_pool(priv, ch, priv->bpid);
|
||||
dpaa2_eth_refill_pool(priv, ch);
|
||||
|
||||
store_cleaned = dpaa2_eth_consume_frames(ch, &fq);
|
||||
if (store_cleaned <= 0)
|
||||
|
@ -1894,6 +2020,11 @@ static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
|
|||
out:
|
||||
netif_receive_skb_list(ch->rx_list);
|
||||
|
||||
if (ch->xsk_tx_pkts_sent) {
|
||||
xsk_tx_completed(ch->xsk_pool, ch->xsk_tx_pkts_sent);
|
||||
ch->xsk_tx_pkts_sent = 0;
|
||||
}
|
||||
|
||||
if (txc_fq && txc_fq->dq_frames) {
|
||||
nq = netdev_get_tx_queue(priv->net_dev, txc_fq->flowid);
|
||||
netdev_tx_completed_queue(nq, txc_fq->dq_frames,
|
||||
|
@ -2047,15 +2178,7 @@ static int dpaa2_eth_open(struct net_device *net_dev)
|
|||
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
|
||||
int err;
|
||||
|
||||
err = dpaa2_eth_seed_pool(priv, priv->bpid);
|
||||
if (err) {
|
||||
/* Not much to do; the buffer pool, though not filled up,
|
||||
* may still contain some buffers which would enable us
|
||||
* to limp on.
|
||||
*/
|
||||
netdev_err(net_dev, "Buffer seeding failed for DPBP %d (bpid=%d)\n",
|
||||
priv->dpbp_dev->obj_desc.id, priv->bpid);
|
||||
}
|
||||
dpaa2_eth_seed_pools(priv);
|
||||
|
||||
if (!dpaa2_eth_is_type_phy(priv)) {
|
||||
/* We'll only start the txqs when the link is actually ready;
|
||||
|
@ -2088,7 +2211,7 @@ static int dpaa2_eth_open(struct net_device *net_dev)
|
|||
|
||||
enable_err:
|
||||
dpaa2_eth_disable_ch_napi(priv);
|
||||
dpaa2_eth_drain_pool(priv);
|
||||
dpaa2_eth_drain_pools(priv);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -2193,7 +2316,7 @@ static int dpaa2_eth_stop(struct net_device *net_dev)
|
|||
dpaa2_eth_disable_ch_napi(priv);
|
||||
|
||||
/* Empty the buffer pool */
|
||||
dpaa2_eth_drain_pool(priv);
|
||||
dpaa2_eth_drain_pools(priv);
|
||||
|
||||
/* Empty the Scatter-Gather Buffer cache */
|
||||
dpaa2_eth_sgt_cache_drain(priv);
|
||||
|
@ -2602,7 +2725,7 @@ static int dpaa2_eth_setup_xdp(struct net_device *dev, struct bpf_prog *prog)
|
|||
need_update = (!!priv->xdp_prog != !!prog);
|
||||
|
||||
if (up)
|
||||
dpaa2_eth_stop(dev);
|
||||
dev_close(dev);
|
||||
|
||||
/* While in xdp mode, enforce a maximum Rx frame size based on MTU.
|
||||
* Also, when switching between xdp/non-xdp modes we need to reconfigure
|
||||
|
@ -2630,7 +2753,7 @@ static int dpaa2_eth_setup_xdp(struct net_device *dev, struct bpf_prog *prog)
|
|||
}
|
||||
|
||||
if (up) {
|
||||
err = dpaa2_eth_open(dev);
|
||||
err = dev_open(dev, NULL);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
@ -2641,7 +2764,7 @@ out_err:
|
|||
if (prog)
|
||||
bpf_prog_sub(prog, priv->num_channels);
|
||||
if (up)
|
||||
dpaa2_eth_open(dev);
|
||||
dev_open(dev, NULL);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
@ -2651,6 +2774,8 @@ static int dpaa2_eth_xdp(struct net_device *dev, struct netdev_bpf *xdp)
|
|||
switch (xdp->command) {
|
||||
case XDP_SETUP_PROG:
|
||||
return dpaa2_eth_setup_xdp(dev, xdp->prog);
|
||||
case XDP_SETUP_XSK_POOL:
|
||||
return dpaa2_xsk_setup_pool(dev, xdp->xsk.pool, xdp->xsk.queue_id);
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -2881,6 +3006,7 @@ static const struct net_device_ops dpaa2_eth_ops = {
|
|||
.ndo_change_mtu = dpaa2_eth_change_mtu,
|
||||
.ndo_bpf = dpaa2_eth_xdp,
|
||||
.ndo_xdp_xmit = dpaa2_eth_xdp_xmit,
|
||||
.ndo_xsk_wakeup = dpaa2_xsk_wakeup,
|
||||
.ndo_setup_tc = dpaa2_eth_setup_tc,
|
||||
.ndo_vlan_rx_add_vid = dpaa2_eth_rx_add_vid,
|
||||
.ndo_vlan_rx_kill_vid = dpaa2_eth_rx_kill_vid
|
||||
|
@ -2895,7 +3021,11 @@ static void dpaa2_eth_cdan_cb(struct dpaa2_io_notification_ctx *ctx)
|
|||
/* Update NAPI statistics */
|
||||
ch->stats.cdan++;
|
||||
|
||||
napi_schedule(&ch->napi);
|
||||
/* NAPI can also be scheduled from the AF_XDP Tx path. Mark a missed
|
||||
* so that it can be rescheduled again.
|
||||
*/
|
||||
if (!napi_if_scheduled_mark_missed(&ch->napi))
|
||||
napi_schedule(&ch->napi);
|
||||
}
|
||||
|
||||
/* Allocate and configure a DPCON object */
|
||||
|
@ -3204,13 +3334,14 @@ static void dpaa2_eth_setup_fqs(struct dpaa2_eth_priv *priv)
|
|||
dpaa2_eth_set_fq_affinity(priv);
|
||||
}
|
||||
|
||||
/* Allocate and configure one buffer pool for each interface */
|
||||
static int dpaa2_eth_setup_dpbp(struct dpaa2_eth_priv *priv)
|
||||
/* Allocate and configure a buffer pool */
|
||||
struct dpaa2_eth_bp *dpaa2_eth_allocate_dpbp(struct dpaa2_eth_priv *priv)
|
||||
{
|
||||
int err;
|
||||
struct fsl_mc_device *dpbp_dev;
|
||||
struct device *dev = priv->net_dev->dev.parent;
|
||||
struct fsl_mc_device *dpbp_dev;
|
||||
struct dpbp_attr dpbp_attrs;
|
||||
struct dpaa2_eth_bp *bp;
|
||||
int err;
|
||||
|
||||
err = fsl_mc_object_allocate(to_fsl_mc_device(dev), FSL_MC_POOL_DPBP,
|
||||
&dpbp_dev);
|
||||
|
@ -3219,12 +3350,16 @@ static int dpaa2_eth_setup_dpbp(struct dpaa2_eth_priv *priv)
|
|||
err = -EPROBE_DEFER;
|
||||
else
|
||||
dev_err(dev, "DPBP device allocation failed\n");
|
||||
return err;
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
priv->dpbp_dev = dpbp_dev;
|
||||
bp = kzalloc(sizeof(*bp), GFP_KERNEL);
|
||||
if (!bp) {
|
||||
err = -ENOMEM;
|
||||
goto err_alloc;
|
||||
}
|
||||
|
||||
err = dpbp_open(priv->mc_io, 0, priv->dpbp_dev->obj_desc.id,
|
||||
err = dpbp_open(priv->mc_io, 0, dpbp_dev->obj_desc.id,
|
||||
&dpbp_dev->mc_handle);
|
||||
if (err) {
|
||||
dev_err(dev, "dpbp_open() failed\n");
|
||||
|
@ -3249,9 +3384,11 @@ static int dpaa2_eth_setup_dpbp(struct dpaa2_eth_priv *priv)
|
|||
dev_err(dev, "dpbp_get_attributes() failed\n");
|
||||
goto err_get_attr;
|
||||
}
|
||||
priv->bpid = dpbp_attrs.bpid;
|
||||
|
||||
return 0;
|
||||
bp->dev = dpbp_dev;
|
||||
bp->bpid = dpbp_attrs.bpid;
|
||||
|
||||
return bp;
|
||||
|
||||
err_get_attr:
|
||||
dpbp_disable(priv->mc_io, 0, dpbp_dev->mc_handle);
|
||||
|
@ -3259,17 +3396,58 @@ err_enable:
|
|||
err_reset:
|
||||
dpbp_close(priv->mc_io, 0, dpbp_dev->mc_handle);
|
||||
err_open:
|
||||
kfree(bp);
|
||||
err_alloc:
|
||||
fsl_mc_object_free(dpbp_dev);
|
||||
|
||||
return err;
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
static void dpaa2_eth_free_dpbp(struct dpaa2_eth_priv *priv)
|
||||
static int dpaa2_eth_setup_default_dpbp(struct dpaa2_eth_priv *priv)
|
||||
{
|
||||
dpaa2_eth_drain_pool(priv);
|
||||
dpbp_disable(priv->mc_io, 0, priv->dpbp_dev->mc_handle);
|
||||
dpbp_close(priv->mc_io, 0, priv->dpbp_dev->mc_handle);
|
||||
fsl_mc_object_free(priv->dpbp_dev);
|
||||
struct dpaa2_eth_bp *bp;
|
||||
int i;
|
||||
|
||||
bp = dpaa2_eth_allocate_dpbp(priv);
|
||||
if (IS_ERR(bp))
|
||||
return PTR_ERR(bp);
|
||||
|
||||
priv->bp[DPAA2_ETH_DEFAULT_BP_IDX] = bp;
|
||||
priv->num_bps++;
|
||||
|
||||
for (i = 0; i < priv->num_channels; i++)
|
||||
priv->channel[i]->bp = bp;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void dpaa2_eth_free_dpbp(struct dpaa2_eth_priv *priv, struct dpaa2_eth_bp *bp)
|
||||
{
|
||||
int idx_bp;
|
||||
|
||||
/* Find the index at which this BP is stored */
|
||||
for (idx_bp = 0; idx_bp < priv->num_bps; idx_bp++)
|
||||
if (priv->bp[idx_bp] == bp)
|
||||
break;
|
||||
|
||||
/* Drain the pool and disable the associated MC object */
|
||||
dpaa2_eth_drain_pool(priv, bp->bpid);
|
||||
dpbp_disable(priv->mc_io, 0, bp->dev->mc_handle);
|
||||
dpbp_close(priv->mc_io, 0, bp->dev->mc_handle);
|
||||
fsl_mc_object_free(bp->dev);
|
||||
kfree(bp);
|
||||
|
||||
/* Move the last in use DPBP over in this position */
|
||||
priv->bp[idx_bp] = priv->bp[priv->num_bps - 1];
|
||||
priv->num_bps--;
|
||||
}
|
||||
|
||||
static void dpaa2_eth_free_dpbps(struct dpaa2_eth_priv *priv)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < priv->num_bps; i++)
|
||||
dpaa2_eth_free_dpbp(priv, priv->bp[i]);
|
||||
}
|
||||
|
||||
static int dpaa2_eth_set_buffer_layout(struct dpaa2_eth_priv *priv)
|
||||
|
@ -4154,15 +4332,16 @@ out:
|
|||
*/
|
||||
static int dpaa2_eth_bind_dpni(struct dpaa2_eth_priv *priv)
|
||||
{
|
||||
struct dpaa2_eth_bp *bp = priv->bp[DPAA2_ETH_DEFAULT_BP_IDX];
|
||||
struct net_device *net_dev = priv->net_dev;
|
||||
struct dpni_pools_cfg pools_params = { 0 };
|
||||
struct device *dev = net_dev->dev.parent;
|
||||
struct dpni_pools_cfg pools_params;
|
||||
struct dpni_error_cfg err_cfg;
|
||||
int err = 0;
|
||||
int i;
|
||||
|
||||
pools_params.num_dpbp = 1;
|
||||
pools_params.pools[0].dpbp_id = priv->dpbp_dev->obj_desc.id;
|
||||
pools_params.pools[0].dpbp_id = bp->dev->obj_desc.id;
|
||||
pools_params.pools[0].backup_pool = 0;
|
||||
pools_params.pools[0].buffer_size = priv->rx_buf_size;
|
||||
err = dpni_set_pools(priv->mc_io, 0, priv->mc_token, &pools_params);
|
||||
|
@ -4641,7 +4820,7 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
|
|||
|
||||
dpaa2_eth_setup_fqs(priv);
|
||||
|
||||
err = dpaa2_eth_setup_dpbp(priv);
|
||||
err = dpaa2_eth_setup_default_dpbp(priv);
|
||||
if (err)
|
||||
goto err_dpbp_setup;
|
||||
|
||||
|
@ -4777,7 +4956,7 @@ err_alloc_percpu_extras:
|
|||
err_alloc_percpu_stats:
|
||||
dpaa2_eth_del_ch_napi(priv);
|
||||
err_bind:
|
||||
dpaa2_eth_free_dpbp(priv);
|
||||
dpaa2_eth_free_dpbps(priv);
|
||||
err_dpbp_setup:
|
||||
dpaa2_eth_free_dpio(priv);
|
||||
err_dpio_setup:
|
||||
|
@ -4830,7 +5009,7 @@ static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev)
|
|||
free_percpu(priv->percpu_extras);
|
||||
|
||||
dpaa2_eth_del_ch_napi(priv);
|
||||
dpaa2_eth_free_dpbp(priv);
|
||||
dpaa2_eth_free_dpbps(priv);
|
||||
dpaa2_eth_free_dpio(priv);
|
||||
dpaa2_eth_free_dpni(priv);
|
||||
if (priv->onestep_reg_base)
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
|
||||
/* Copyright 2014-2016 Freescale Semiconductor Inc.
|
||||
* Copyright 2016-2020 NXP
|
||||
* Copyright 2016-2022 NXP
|
||||
*/
|
||||
|
||||
#ifndef __DPAA2_ETH_H
|
||||
|
@ -53,6 +53,12 @@
|
|||
*/
|
||||
#define DPAA2_ETH_TXCONF_PER_NAPI 256
|
||||
|
||||
/* Maximum number of Tx frames to be processed in a single NAPI
|
||||
* call when AF_XDP is running. Bind it to DPAA2_ETH_TXCONF_PER_NAPI
|
||||
* to maximize the throughput.
|
||||
*/
|
||||
#define DPAA2_ETH_TX_ZC_PER_NAPI DPAA2_ETH_TXCONF_PER_NAPI
|
||||
|
||||
/* Buffer qouta per channel. We want to keep in check number of ingress frames
|
||||
* in flight: for small sized frames, congestion group taildrop may kick in
|
||||
* first; for large sizes, Rx FQ taildrop threshold will ensure only a
|
||||
|
@ -109,6 +115,14 @@
|
|||
#define DPAA2_ETH_RX_BUF_ALIGN_REV1 256
|
||||
#define DPAA2_ETH_RX_BUF_ALIGN 64
|
||||
|
||||
/* The firmware allows assigning multiple buffer pools to a single DPNI -
|
||||
* maximum 8 DPBP objects. By default, only the first DPBP (idx 0) is used for
|
||||
* all queues. Thus, when enabling AF_XDP we must accommodate up to 9 DPBPs
|
||||
* object: the default and 8 other distinct buffer pools, one for each queue.
|
||||
*/
|
||||
#define DPAA2_ETH_DEFAULT_BP_IDX 0
|
||||
#define DPAA2_ETH_MAX_BPS 9
|
||||
|
||||
/* We are accommodating a skb backpointer and some S/G info
|
||||
* in the frame's software annotation. The hardware
|
||||
* options are either 0 or 64, so we choose the latter.
|
||||
|
@ -122,6 +136,7 @@ enum dpaa2_eth_swa_type {
|
|||
DPAA2_ETH_SWA_SINGLE,
|
||||
DPAA2_ETH_SWA_SG,
|
||||
DPAA2_ETH_SWA_XDP,
|
||||
DPAA2_ETH_SWA_XSK,
|
||||
DPAA2_ETH_SWA_SW_TSO,
|
||||
};
|
||||
|
||||
|
@ -143,6 +158,10 @@ struct dpaa2_eth_swa {
|
|||
int dma_size;
|
||||
struct xdp_frame *xdpf;
|
||||
} xdp;
|
||||
struct {
|
||||
struct xdp_buff *xdp_buff;
|
||||
int sgt_size;
|
||||
} xsk;
|
||||
struct {
|
||||
struct sk_buff *skb;
|
||||
int num_sg;
|
||||
|
@ -421,12 +440,19 @@ enum dpaa2_eth_fq_type {
|
|||
};
|
||||
|
||||
struct dpaa2_eth_priv;
|
||||
struct dpaa2_eth_channel;
|
||||
struct dpaa2_eth_fq;
|
||||
|
||||
struct dpaa2_eth_xdp_fds {
|
||||
struct dpaa2_fd fds[DEV_MAP_BULK_SIZE];
|
||||
ssize_t num;
|
||||
};
|
||||
|
||||
typedef void dpaa2_eth_consume_cb_t(struct dpaa2_eth_priv *priv,
|
||||
struct dpaa2_eth_channel *ch,
|
||||
const struct dpaa2_fd *fd,
|
||||
struct dpaa2_eth_fq *fq);
|
||||
|
||||
struct dpaa2_eth_fq {
|
||||
u32 fqid;
|
||||
u32 tx_qdbin;
|
||||
|
@ -439,10 +465,7 @@ struct dpaa2_eth_fq {
|
|||
struct dpaa2_eth_channel *channel;
|
||||
enum dpaa2_eth_fq_type type;
|
||||
|
||||
void (*consume)(struct dpaa2_eth_priv *priv,
|
||||
struct dpaa2_eth_channel *ch,
|
||||
const struct dpaa2_fd *fd,
|
||||
struct dpaa2_eth_fq *fq);
|
||||
dpaa2_eth_consume_cb_t *consume;
|
||||
struct dpaa2_eth_fq_stats stats;
|
||||
|
||||
struct dpaa2_eth_xdp_fds xdp_redirect_fds;
|
||||
|
@ -454,6 +477,11 @@ struct dpaa2_eth_ch_xdp {
|
|||
unsigned int res;
|
||||
};
|
||||
|
||||
struct dpaa2_eth_bp {
|
||||
struct fsl_mc_device *dev;
|
||||
int bpid;
|
||||
};
|
||||
|
||||
struct dpaa2_eth_channel {
|
||||
struct dpaa2_io_notification_ctx nctx;
|
||||
struct fsl_mc_device *dpcon;
|
||||
|
@ -472,6 +500,11 @@ struct dpaa2_eth_channel {
|
|||
/* Buffers to be recycled back in the buffer pool */
|
||||
u64 recycled_bufs[DPAA2_ETH_BUFS_PER_CMD];
|
||||
int recycled_bufs_cnt;
|
||||
|
||||
bool xsk_zc;
|
||||
int xsk_tx_pkts_sent;
|
||||
struct xsk_buff_pool *xsk_pool;
|
||||
struct dpaa2_eth_bp *bp;
|
||||
};
|
||||
|
||||
struct dpaa2_eth_dist_fields {
|
||||
|
@ -506,7 +539,7 @@ struct dpaa2_eth_trap_data {
|
|||
|
||||
#define DPAA2_ETH_DEFAULT_COPYBREAK 512
|
||||
|
||||
#define DPAA2_ETH_ENQUEUE_MAX_FDS 200
|
||||
#define DPAA2_ETH_ENQUEUE_MAX_FDS 256
|
||||
struct dpaa2_eth_fds {
|
||||
struct dpaa2_fd array[DPAA2_ETH_ENQUEUE_MAX_FDS];
|
||||
};
|
||||
|
@ -535,14 +568,16 @@ struct dpaa2_eth_priv {
|
|||
u8 ptp_correction_off;
|
||||
void (*dpaa2_set_onestep_params_cb)(struct dpaa2_eth_priv *priv,
|
||||
u32 offset, u8 udp);
|
||||
struct fsl_mc_device *dpbp_dev;
|
||||
u16 rx_buf_size;
|
||||
u16 bpid;
|
||||
struct iommu_domain *iommu_domain;
|
||||
|
||||
enum hwtstamp_tx_types tx_tstamp_type; /* Tx timestamping type */
|
||||
bool rx_tstamp; /* Rx timestamping enabled */
|
||||
|
||||
/* Buffer pool management */
|
||||
struct dpaa2_eth_bp *bp[DPAA2_ETH_MAX_BPS];
|
||||
int num_bps;
|
||||
|
||||
u16 tx_qdid;
|
||||
struct fsl_mc_io *mc_io;
|
||||
/* Cores which have an affine DPIO/DPCON.
|
||||
|
@ -771,4 +806,54 @@ void dpaa2_eth_dl_traps_unregister(struct dpaa2_eth_priv *priv);
|
|||
|
||||
struct dpaa2_eth_trap_item *dpaa2_eth_dl_get_trap(struct dpaa2_eth_priv *priv,
|
||||
struct dpaa2_fapr *fapr);
|
||||
|
||||
struct dpaa2_eth_bp *dpaa2_eth_allocate_dpbp(struct dpaa2_eth_priv *priv);
|
||||
void dpaa2_eth_free_dpbp(struct dpaa2_eth_priv *priv, struct dpaa2_eth_bp *bp);
|
||||
|
||||
struct sk_buff *dpaa2_eth_alloc_skb(struct dpaa2_eth_priv *priv,
|
||||
struct dpaa2_eth_channel *ch,
|
||||
const struct dpaa2_fd *fd, u32 fd_length,
|
||||
void *fd_vaddr);
|
||||
|
||||
void dpaa2_eth_receive_skb(struct dpaa2_eth_priv *priv,
|
||||
struct dpaa2_eth_channel *ch,
|
||||
const struct dpaa2_fd *fd, void *vaddr,
|
||||
struct dpaa2_eth_fq *fq,
|
||||
struct rtnl_link_stats64 *percpu_stats,
|
||||
struct sk_buff *skb);
|
||||
|
||||
void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
|
||||
struct dpaa2_eth_channel *ch,
|
||||
const struct dpaa2_fd *fd,
|
||||
struct dpaa2_eth_fq *fq);
|
||||
|
||||
struct dpaa2_eth_bp *dpaa2_eth_allocate_dpbp(struct dpaa2_eth_priv *priv);
|
||||
void dpaa2_eth_free_dpbp(struct dpaa2_eth_priv *priv,
|
||||
struct dpaa2_eth_bp *bp);
|
||||
|
||||
void *dpaa2_iova_to_virt(struct iommu_domain *domain, dma_addr_t iova_addr);
|
||||
void dpaa2_eth_recycle_buf(struct dpaa2_eth_priv *priv,
|
||||
struct dpaa2_eth_channel *ch,
|
||||
dma_addr_t addr);
|
||||
|
||||
void dpaa2_eth_xdp_enqueue(struct dpaa2_eth_priv *priv,
|
||||
struct dpaa2_eth_channel *ch,
|
||||
struct dpaa2_fd *fd,
|
||||
void *buf_start, u16 queue_id);
|
||||
|
||||
int dpaa2_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags);
|
||||
int dpaa2_xsk_setup_pool(struct net_device *dev, struct xsk_buff_pool *pool, u16 qid);
|
||||
|
||||
void dpaa2_eth_free_tx_fd(struct dpaa2_eth_priv *priv,
|
||||
struct dpaa2_eth_channel *ch,
|
||||
struct dpaa2_eth_fq *fq,
|
||||
const struct dpaa2_fd *fd, bool in_napi);
|
||||
bool dpaa2_xsk_tx(struct dpaa2_eth_priv *priv,
|
||||
struct dpaa2_eth_channel *ch);
|
||||
|
||||
/* SGT (Scatter-Gather Table) cache management */
|
||||
void *dpaa2_eth_sgt_get(struct dpaa2_eth_priv *priv);
|
||||
|
||||
void dpaa2_eth_sgt_recycle(struct dpaa2_eth_priv *priv, void *sgt_buf);
|
||||
|
||||
#endif /* __DPAA2_H */
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
|
||||
/* Copyright 2014-2016 Freescale Semiconductor Inc.
|
||||
* Copyright 2016 NXP
|
||||
* Copyright 2020 NXP
|
||||
* Copyright 2016-2022 NXP
|
||||
*/
|
||||
|
||||
#include <linux/net_tstamp.h>
|
||||
|
@ -227,17 +226,8 @@ static void dpaa2_eth_get_ethtool_stats(struct net_device *net_dev,
|
|||
struct ethtool_stats *stats,
|
||||
u64 *data)
|
||||
{
|
||||
int i = 0;
|
||||
int j, k, err;
|
||||
int num_cnt;
|
||||
union dpni_statistics dpni_stats;
|
||||
u32 fcnt, bcnt;
|
||||
u32 fcnt_rx_total = 0, fcnt_tx_total = 0;
|
||||
u32 bcnt_rx_total = 0, bcnt_tx_total = 0;
|
||||
u32 buf_cnt;
|
||||
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
|
||||
struct dpaa2_eth_drv_stats *extras;
|
||||
struct dpaa2_eth_ch_stats *ch_stats;
|
||||
union dpni_statistics dpni_stats;
|
||||
int dpni_stats_page_size[DPNI_STATISTICS_CNT] = {
|
||||
sizeof(dpni_stats.page_0),
|
||||
sizeof(dpni_stats.page_1),
|
||||
|
@ -247,6 +237,13 @@ static void dpaa2_eth_get_ethtool_stats(struct net_device *net_dev,
|
|||
sizeof(dpni_stats.page_5),
|
||||
sizeof(dpni_stats.page_6),
|
||||
};
|
||||
u32 fcnt_rx_total = 0, fcnt_tx_total = 0;
|
||||
u32 bcnt_rx_total = 0, bcnt_tx_total = 0;
|
||||
struct dpaa2_eth_ch_stats *ch_stats;
|
||||
struct dpaa2_eth_drv_stats *extras;
|
||||
u32 buf_cnt, buf_cnt_total = 0;
|
||||
int j, k, err, num_cnt, i = 0;
|
||||
u32 fcnt, bcnt;
|
||||
|
||||
memset(data, 0,
|
||||
sizeof(u64) * (DPAA2_ETH_NUM_STATS + DPAA2_ETH_NUM_EXTRA_STATS));
|
||||
|
@ -308,12 +305,15 @@ static void dpaa2_eth_get_ethtool_stats(struct net_device *net_dev,
|
|||
*(data + i++) = fcnt_tx_total;
|
||||
*(data + i++) = bcnt_tx_total;
|
||||
|
||||
err = dpaa2_io_query_bp_count(NULL, priv->bpid, &buf_cnt);
|
||||
if (err) {
|
||||
netdev_warn(net_dev, "Buffer count query error %d\n", err);
|
||||
return;
|
||||
for (j = 0; j < priv->num_bps; j++) {
|
||||
err = dpaa2_io_query_bp_count(NULL, priv->bp[j]->bpid, &buf_cnt);
|
||||
if (err) {
|
||||
netdev_warn(net_dev, "Buffer count query error %d\n", err);
|
||||
return;
|
||||
}
|
||||
buf_cnt_total += buf_cnt;
|
||||
}
|
||||
*(data + i++) = buf_cnt;
|
||||
*(data + i++) = buf_cnt_total;
|
||||
|
||||
if (dpaa2_eth_has_mac(priv))
|
||||
dpaa2_mac_get_ethtool_stats(priv->mac, data + i);
|
||||
|
@ -876,6 +876,29 @@ restore_rx_usecs:
|
|||
return err;
|
||||
}
|
||||
|
||||
static void dpaa2_eth_get_channels(struct net_device *net_dev,
|
||||
struct ethtool_channels *channels)
|
||||
{
|
||||
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
|
||||
int queue_count = dpaa2_eth_queue_count(priv);
|
||||
|
||||
channels->max_rx = queue_count;
|
||||
channels->max_tx = queue_count;
|
||||
channels->rx_count = queue_count;
|
||||
channels->tx_count = queue_count;
|
||||
|
||||
/* Tx confirmation and Rx error */
|
||||
channels->max_other = queue_count + 1;
|
||||
channels->max_combined = channels->max_rx +
|
||||
channels->max_tx +
|
||||
channels->max_other;
|
||||
/* Tx conf and Rx err */
|
||||
channels->other_count = queue_count + 1;
|
||||
channels->combined_count = channels->rx_count +
|
||||
channels->tx_count +
|
||||
channels->other_count;
|
||||
}
|
||||
|
||||
const struct ethtool_ops dpaa2_ethtool_ops = {
|
||||
.supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS |
|
||||
ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
|
||||
|
@ -896,4 +919,5 @@ const struct ethtool_ops dpaa2_ethtool_ops = {
|
|||
.set_tunable = dpaa2_eth_set_tunable,
|
||||
.get_coalesce = dpaa2_eth_get_coalesce,
|
||||
.set_coalesce = dpaa2_eth_set_coalesce,
|
||||
.get_channels = dpaa2_eth_get_channels,
|
||||
};
|
||||
|
|
454
drivers/net/ethernet/freescale/dpaa2/dpaa2-xsk.c
Normal file
454
drivers/net/ethernet/freescale/dpaa2/dpaa2-xsk.c
Normal file
|
@ -0,0 +1,454 @@
|
|||
// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
|
||||
/* Copyright 2022 NXP
|
||||
*/
|
||||
#include <linux/filter.h>
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/bpf_trace.h>
|
||||
#include <net/xdp.h>
|
||||
#include <net/xdp_sock_drv.h>
|
||||
|
||||
#include "dpaa2-eth.h"
|
||||
|
||||
static void dpaa2_eth_setup_consume_func(struct dpaa2_eth_priv *priv,
|
||||
struct dpaa2_eth_channel *ch,
|
||||
enum dpaa2_eth_fq_type type,
|
||||
dpaa2_eth_consume_cb_t *consume)
|
||||
{
|
||||
struct dpaa2_eth_fq *fq;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < priv->num_fqs; i++) {
|
||||
fq = &priv->fq[i];
|
||||
|
||||
if (fq->type != type)
|
||||
continue;
|
||||
if (fq->channel != ch)
|
||||
continue;
|
||||
|
||||
fq->consume = consume;
|
||||
}
|
||||
}
|
||||
|
||||
static u32 dpaa2_xsk_run_xdp(struct dpaa2_eth_priv *priv,
|
||||
struct dpaa2_eth_channel *ch,
|
||||
struct dpaa2_eth_fq *rx_fq,
|
||||
struct dpaa2_fd *fd, void *vaddr)
|
||||
{
|
||||
dma_addr_t addr = dpaa2_fd_get_addr(fd);
|
||||
struct bpf_prog *xdp_prog;
|
||||
struct xdp_buff *xdp_buff;
|
||||
struct dpaa2_eth_swa *swa;
|
||||
u32 xdp_act = XDP_PASS;
|
||||
int err;
|
||||
|
||||
xdp_prog = READ_ONCE(ch->xdp.prog);
|
||||
if (!xdp_prog)
|
||||
goto out;
|
||||
|
||||
swa = (struct dpaa2_eth_swa *)(vaddr + DPAA2_ETH_RX_HWA_SIZE +
|
||||
ch->xsk_pool->umem->headroom);
|
||||
xdp_buff = swa->xsk.xdp_buff;
|
||||
|
||||
xdp_buff->data_hard_start = vaddr;
|
||||
xdp_buff->data = vaddr + dpaa2_fd_get_offset(fd);
|
||||
xdp_buff->data_end = xdp_buff->data + dpaa2_fd_get_len(fd);
|
||||
xdp_set_data_meta_invalid(xdp_buff);
|
||||
xdp_buff->rxq = &ch->xdp_rxq;
|
||||
|
||||
xsk_buff_dma_sync_for_cpu(xdp_buff, ch->xsk_pool);
|
||||
xdp_act = bpf_prog_run_xdp(xdp_prog, xdp_buff);
|
||||
|
||||
/* xdp.data pointer may have changed */
|
||||
dpaa2_fd_set_offset(fd, xdp_buff->data - vaddr);
|
||||
dpaa2_fd_set_len(fd, xdp_buff->data_end - xdp_buff->data);
|
||||
|
||||
if (likely(xdp_act == XDP_REDIRECT)) {
|
||||
err = xdp_do_redirect(priv->net_dev, xdp_buff, xdp_prog);
|
||||
if (unlikely(err)) {
|
||||
ch->stats.xdp_drop++;
|
||||
dpaa2_eth_recycle_buf(priv, ch, addr);
|
||||
} else {
|
||||
ch->buf_count--;
|
||||
ch->stats.xdp_redirect++;
|
||||
}
|
||||
|
||||
goto xdp_redir;
|
||||
}
|
||||
|
||||
switch (xdp_act) {
|
||||
case XDP_PASS:
|
||||
break;
|
||||
case XDP_TX:
|
||||
dpaa2_eth_xdp_enqueue(priv, ch, fd, vaddr, rx_fq->flowid);
|
||||
break;
|
||||
default:
|
||||
bpf_warn_invalid_xdp_action(priv->net_dev, xdp_prog, xdp_act);
|
||||
fallthrough;
|
||||
case XDP_ABORTED:
|
||||
trace_xdp_exception(priv->net_dev, xdp_prog, xdp_act);
|
||||
fallthrough;
|
||||
case XDP_DROP:
|
||||
dpaa2_eth_recycle_buf(priv, ch, addr);
|
||||
ch->stats.xdp_drop++;
|
||||
break;
|
||||
}
|
||||
|
||||
xdp_redir:
|
||||
ch->xdp.res |= xdp_act;
|
||||
out:
|
||||
return xdp_act;
|
||||
}
|
||||
|
||||
/* Rx frame processing routine for the AF_XDP fast path */
|
||||
static void dpaa2_xsk_rx(struct dpaa2_eth_priv *priv,
|
||||
struct dpaa2_eth_channel *ch,
|
||||
const struct dpaa2_fd *fd,
|
||||
struct dpaa2_eth_fq *fq)
|
||||
{
|
||||
dma_addr_t addr = dpaa2_fd_get_addr(fd);
|
||||
u8 fd_format = dpaa2_fd_get_format(fd);
|
||||
struct rtnl_link_stats64 *percpu_stats;
|
||||
u32 fd_length = dpaa2_fd_get_len(fd);
|
||||
struct sk_buff *skb;
|
||||
void *vaddr;
|
||||
u32 xdp_act;
|
||||
|
||||
trace_dpaa2_rx_xsk_fd(priv->net_dev, fd);
|
||||
|
||||
vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
|
||||
percpu_stats = this_cpu_ptr(priv->percpu_stats);
|
||||
|
||||
if (fd_format != dpaa2_fd_single) {
|
||||
WARN_ON(priv->xdp_prog);
|
||||
/* AF_XDP doesn't support any other formats */
|
||||
goto err_frame_format;
|
||||
}
|
||||
|
||||
xdp_act = dpaa2_xsk_run_xdp(priv, ch, fq, (struct dpaa2_fd *)fd, vaddr);
|
||||
if (xdp_act != XDP_PASS) {
|
||||
percpu_stats->rx_packets++;
|
||||
percpu_stats->rx_bytes += dpaa2_fd_get_len(fd);
|
||||
return;
|
||||
}
|
||||
|
||||
/* Build skb */
|
||||
skb = dpaa2_eth_alloc_skb(priv, ch, fd, fd_length, vaddr);
|
||||
if (!skb)
|
||||
/* Nothing else we can do, recycle the buffer and
|
||||
* drop the frame.
|
||||
*/
|
||||
goto err_alloc_skb;
|
||||
|
||||
/* Send the skb to the Linux networking stack */
|
||||
dpaa2_eth_receive_skb(priv, ch, fd, vaddr, fq, percpu_stats, skb);
|
||||
|
||||
return;
|
||||
|
||||
err_alloc_skb:
|
||||
dpaa2_eth_recycle_buf(priv, ch, addr);
|
||||
err_frame_format:
|
||||
percpu_stats->rx_dropped++;
|
||||
}
|
||||
|
||||
static void dpaa2_xsk_set_bp_per_qdbin(struct dpaa2_eth_priv *priv,
|
||||
struct dpni_pools_cfg *pools_params)
|
||||
{
|
||||
int curr_bp = 0, i, j;
|
||||
|
||||
pools_params->pool_options = DPNI_POOL_ASSOC_QDBIN;
|
||||
for (i = 0; i < priv->num_bps; i++) {
|
||||
for (j = 0; j < priv->num_channels; j++)
|
||||
if (priv->bp[i] == priv->channel[j]->bp)
|
||||
pools_params->pools[curr_bp].priority_mask |= (1 << j);
|
||||
if (!pools_params->pools[curr_bp].priority_mask)
|
||||
continue;
|
||||
|
||||
pools_params->pools[curr_bp].dpbp_id = priv->bp[i]->bpid;
|
||||
pools_params->pools[curr_bp].buffer_size = priv->rx_buf_size;
|
||||
pools_params->pools[curr_bp++].backup_pool = 0;
|
||||
}
|
||||
pools_params->num_dpbp = curr_bp;
|
||||
}
|
||||
|
||||
static int dpaa2_xsk_disable_pool(struct net_device *dev, u16 qid)
|
||||
{
|
||||
struct xsk_buff_pool *pool = xsk_get_pool_from_qid(dev, qid);
|
||||
struct dpaa2_eth_priv *priv = netdev_priv(dev);
|
||||
struct dpni_pools_cfg pools_params = { 0 };
|
||||
struct dpaa2_eth_channel *ch;
|
||||
int err;
|
||||
bool up;
|
||||
|
||||
ch = priv->channel[qid];
|
||||
if (!ch->xsk_pool)
|
||||
return -EINVAL;
|
||||
|
||||
up = netif_running(dev);
|
||||
if (up)
|
||||
dev_close(dev);
|
||||
|
||||
xsk_pool_dma_unmap(pool, 0);
|
||||
err = xdp_rxq_info_reg_mem_model(&ch->xdp_rxq,
|
||||
MEM_TYPE_PAGE_ORDER0, NULL);
|
||||
if (err)
|
||||
netdev_err(dev, "xsk_rxq_info_reg_mem_model() failed (err = %d)\n",
|
||||
err);
|
||||
|
||||
dpaa2_eth_free_dpbp(priv, ch->bp);
|
||||
|
||||
ch->xsk_zc = false;
|
||||
ch->xsk_pool = NULL;
|
||||
ch->xsk_tx_pkts_sent = 0;
|
||||
ch->bp = priv->bp[DPAA2_ETH_DEFAULT_BP_IDX];
|
||||
|
||||
dpaa2_eth_setup_consume_func(priv, ch, DPAA2_RX_FQ, dpaa2_eth_rx);
|
||||
|
||||
dpaa2_xsk_set_bp_per_qdbin(priv, &pools_params);
|
||||
err = dpni_set_pools(priv->mc_io, 0, priv->mc_token, &pools_params);
|
||||
if (err)
|
||||
netdev_err(dev, "dpni_set_pools() failed\n");
|
||||
|
||||
if (up) {
|
||||
err = dev_open(dev, NULL);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int dpaa2_xsk_enable_pool(struct net_device *dev,
|
||||
struct xsk_buff_pool *pool,
|
||||
u16 qid)
|
||||
{
|
||||
struct dpaa2_eth_priv *priv = netdev_priv(dev);
|
||||
struct dpni_pools_cfg pools_params = { 0 };
|
||||
struct dpaa2_eth_channel *ch;
|
||||
int err, err2;
|
||||
bool up;
|
||||
|
||||
if (priv->dpni_attrs.wriop_version < DPAA2_WRIOP_VERSION(3, 0, 0)) {
|
||||
netdev_err(dev, "AF_XDP zero-copy not supported on devices <= WRIOP(3, 0, 0)\n");
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
if (priv->dpni_attrs.num_queues > 8) {
|
||||
netdev_err(dev, "AF_XDP zero-copy not supported on DPNI with more then 8 queues\n");
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
up = netif_running(dev);
|
||||
if (up)
|
||||
dev_close(dev);
|
||||
|
||||
err = xsk_pool_dma_map(pool, priv->net_dev->dev.parent, 0);
|
||||
if (err) {
|
||||
netdev_err(dev, "xsk_pool_dma_map() failed (err = %d)\n",
|
||||
err);
|
||||
goto err_dma_unmap;
|
||||
}
|
||||
|
||||
ch = priv->channel[qid];
|
||||
err = xdp_rxq_info_reg_mem_model(&ch->xdp_rxq, MEM_TYPE_XSK_BUFF_POOL, NULL);
|
||||
if (err) {
|
||||
netdev_err(dev, "xdp_rxq_info_reg_mem_model() failed (err = %d)\n", err);
|
||||
goto err_mem_model;
|
||||
}
|
||||
xsk_pool_set_rxq_info(pool, &ch->xdp_rxq);
|
||||
|
||||
priv->bp[priv->num_bps] = dpaa2_eth_allocate_dpbp(priv);
|
||||
if (IS_ERR(priv->bp[priv->num_bps])) {
|
||||
err = PTR_ERR(priv->bp[priv->num_bps]);
|
||||
goto err_bp_alloc;
|
||||
}
|
||||
ch->xsk_zc = true;
|
||||
ch->xsk_pool = pool;
|
||||
ch->bp = priv->bp[priv->num_bps++];
|
||||
|
||||
dpaa2_eth_setup_consume_func(priv, ch, DPAA2_RX_FQ, dpaa2_xsk_rx);
|
||||
|
||||
dpaa2_xsk_set_bp_per_qdbin(priv, &pools_params);
|
||||
err = dpni_set_pools(priv->mc_io, 0, priv->mc_token, &pools_params);
|
||||
if (err) {
|
||||
netdev_err(dev, "dpni_set_pools() failed\n");
|
||||
goto err_set_pools;
|
||||
}
|
||||
|
||||
if (up) {
|
||||
err = dev_open(dev, NULL);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err_set_pools:
|
||||
err2 = dpaa2_xsk_disable_pool(dev, qid);
|
||||
if (err2)
|
||||
netdev_err(dev, "dpaa2_xsk_disable_pool() failed %d\n", err2);
|
||||
err_bp_alloc:
|
||||
err2 = xdp_rxq_info_reg_mem_model(&priv->channel[qid]->xdp_rxq,
|
||||
MEM_TYPE_PAGE_ORDER0, NULL);
|
||||
if (err2)
|
||||
netdev_err(dev, "xsk_rxq_info_reg_mem_model() failed with %d)\n", err2);
|
||||
err_mem_model:
|
||||
xsk_pool_dma_unmap(pool, 0);
|
||||
err_dma_unmap:
|
||||
if (up)
|
||||
dev_open(dev, NULL);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
int dpaa2_xsk_setup_pool(struct net_device *dev, struct xsk_buff_pool *pool, u16 qid)
|
||||
{
|
||||
return pool ? dpaa2_xsk_enable_pool(dev, pool, qid) :
|
||||
dpaa2_xsk_disable_pool(dev, qid);
|
||||
}
|
||||
|
||||
int dpaa2_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags)
|
||||
{
|
||||
struct dpaa2_eth_priv *priv = netdev_priv(dev);
|
||||
struct dpaa2_eth_channel *ch = priv->channel[qid];
|
||||
|
||||
if (!priv->link_state.up)
|
||||
return -ENETDOWN;
|
||||
|
||||
if (!priv->xdp_prog)
|
||||
return -EINVAL;
|
||||
|
||||
if (!ch->xsk_zc)
|
||||
return -EINVAL;
|
||||
|
||||
/* We do not have access to a per channel SW interrupt, so instead we
|
||||
* schedule a NAPI instance.
|
||||
*/
|
||||
if (!napi_if_scheduled_mark_missed(&ch->napi))
|
||||
napi_schedule(&ch->napi);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int dpaa2_xsk_tx_build_fd(struct dpaa2_eth_priv *priv,
|
||||
struct dpaa2_eth_channel *ch,
|
||||
struct dpaa2_fd *fd,
|
||||
struct xdp_desc *xdp_desc)
|
||||
{
|
||||
struct device *dev = priv->net_dev->dev.parent;
|
||||
struct dpaa2_sg_entry *sgt;
|
||||
struct dpaa2_eth_swa *swa;
|
||||
void *sgt_buf = NULL;
|
||||
dma_addr_t sgt_addr;
|
||||
int sgt_buf_size;
|
||||
dma_addr_t addr;
|
||||
int err = 0;
|
||||
|
||||
/* Prepare the HW SGT structure */
|
||||
sgt_buf_size = priv->tx_data_offset + sizeof(struct dpaa2_sg_entry);
|
||||
sgt_buf = dpaa2_eth_sgt_get(priv);
|
||||
if (unlikely(!sgt_buf))
|
||||
return -ENOMEM;
|
||||
sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset);
|
||||
|
||||
/* Get the address of the XSK Tx buffer */
|
||||
addr = xsk_buff_raw_get_dma(ch->xsk_pool, xdp_desc->addr);
|
||||
xsk_buff_raw_dma_sync_for_device(ch->xsk_pool, addr, xdp_desc->len);
|
||||
|
||||
/* Fill in the HW SGT structure */
|
||||
dpaa2_sg_set_addr(sgt, addr);
|
||||
dpaa2_sg_set_len(sgt, xdp_desc->len);
|
||||
dpaa2_sg_set_final(sgt, true);
|
||||
|
||||
/* Store the necessary info in the SGT buffer */
|
||||
swa = (struct dpaa2_eth_swa *)sgt_buf;
|
||||
swa->type = DPAA2_ETH_SWA_XSK;
|
||||
swa->xsk.sgt_size = sgt_buf_size;
|
||||
|
||||
/* Separately map the SGT buffer */
|
||||
sgt_addr = dma_map_single(dev, sgt_buf, sgt_buf_size, DMA_BIDIRECTIONAL);
|
||||
if (unlikely(dma_mapping_error(dev, sgt_addr))) {
|
||||
err = -ENOMEM;
|
||||
goto sgt_map_failed;
|
||||
}
|
||||
|
||||
/* Initialize FD fields */
|
||||
memset(fd, 0, sizeof(struct dpaa2_fd));
|
||||
dpaa2_fd_set_offset(fd, priv->tx_data_offset);
|
||||
dpaa2_fd_set_format(fd, dpaa2_fd_sg);
|
||||
dpaa2_fd_set_addr(fd, sgt_addr);
|
||||
dpaa2_fd_set_len(fd, xdp_desc->len);
|
||||
dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
|
||||
|
||||
return 0;
|
||||
|
||||
sgt_map_failed:
|
||||
dpaa2_eth_sgt_recycle(priv, sgt_buf);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
bool dpaa2_xsk_tx(struct dpaa2_eth_priv *priv,
|
||||
struct dpaa2_eth_channel *ch)
|
||||
{
|
||||
struct xdp_desc *xdp_descs = ch->xsk_pool->tx_descs;
|
||||
struct dpaa2_eth_drv_stats *percpu_extras;
|
||||
struct rtnl_link_stats64 *percpu_stats;
|
||||
int budget = DPAA2_ETH_TX_ZC_PER_NAPI;
|
||||
int total_enqueued, enqueued;
|
||||
int retries, max_retries;
|
||||
struct dpaa2_eth_fq *fq;
|
||||
struct dpaa2_fd *fds;
|
||||
int batch, i, err;
|
||||
|
||||
percpu_stats = this_cpu_ptr(priv->percpu_stats);
|
||||
percpu_extras = this_cpu_ptr(priv->percpu_extras);
|
||||
fds = (this_cpu_ptr(priv->fd))->array;
|
||||
|
||||
/* Use the FQ with the same idx as the affine CPU */
|
||||
fq = &priv->fq[ch->nctx.desired_cpu];
|
||||
|
||||
batch = xsk_tx_peek_release_desc_batch(ch->xsk_pool, budget);
|
||||
if (!batch)
|
||||
return false;
|
||||
|
||||
/* Create a FD for each XSK frame to be sent */
|
||||
for (i = 0; i < batch; i++) {
|
||||
err = dpaa2_xsk_tx_build_fd(priv, ch, &fds[i], &xdp_descs[i]);
|
||||
if (err) {
|
||||
batch = i;
|
||||
break;
|
||||
}
|
||||
|
||||
trace_dpaa2_tx_xsk_fd(priv->net_dev, &fds[i]);
|
||||
}
|
||||
|
||||
/* Enqueue all the created FDs */
|
||||
max_retries = batch * DPAA2_ETH_ENQUEUE_RETRIES;
|
||||
total_enqueued = 0;
|
||||
enqueued = 0;
|
||||
retries = 0;
|
||||
while (total_enqueued < batch && retries < max_retries) {
|
||||
err = priv->enqueue(priv, fq, &fds[total_enqueued], 0,
|
||||
batch - total_enqueued, &enqueued);
|
||||
if (err == -EBUSY) {
|
||||
retries++;
|
||||
continue;
|
||||
}
|
||||
|
||||
total_enqueued += enqueued;
|
||||
}
|
||||
percpu_extras->tx_portal_busy += retries;
|
||||
|
||||
/* Update statistics */
|
||||
percpu_stats->tx_packets += total_enqueued;
|
||||
for (i = 0; i < total_enqueued; i++)
|
||||
percpu_stats->tx_bytes += dpaa2_fd_get_len(&fds[i]);
|
||||
for (i = total_enqueued; i < batch; i++) {
|
||||
dpaa2_eth_free_tx_fd(priv, ch, fq, &fds[i], false);
|
||||
percpu_stats->tx_errors++;
|
||||
}
|
||||
|
||||
xsk_tx_release(ch->xsk_pool);
|
||||
|
||||
return total_enqueued == budget ? true : false;
|
||||
}
|
|
@ -13,10 +13,12 @@
|
|||
#define DPNI_VER_MINOR 0
|
||||
#define DPNI_CMD_BASE_VERSION 1
|
||||
#define DPNI_CMD_2ND_VERSION 2
|
||||
#define DPNI_CMD_3RD_VERSION 3
|
||||
#define DPNI_CMD_ID_OFFSET 4
|
||||
|
||||
#define DPNI_CMD(id) (((id) << DPNI_CMD_ID_OFFSET) | DPNI_CMD_BASE_VERSION)
|
||||
#define DPNI_CMD_V2(id) (((id) << DPNI_CMD_ID_OFFSET) | DPNI_CMD_2ND_VERSION)
|
||||
#define DPNI_CMD_V3(id) (((id) << DPNI_CMD_ID_OFFSET) | DPNI_CMD_3RD_VERSION)
|
||||
|
||||
#define DPNI_CMDID_OPEN DPNI_CMD(0x801)
|
||||
#define DPNI_CMDID_CLOSE DPNI_CMD(0x800)
|
||||
|
@ -39,7 +41,7 @@
|
|||
#define DPNI_CMDID_GET_IRQ_STATUS DPNI_CMD(0x016)
|
||||
#define DPNI_CMDID_CLEAR_IRQ_STATUS DPNI_CMD(0x017)
|
||||
|
||||
#define DPNI_CMDID_SET_POOLS DPNI_CMD(0x200)
|
||||
#define DPNI_CMDID_SET_POOLS DPNI_CMD_V3(0x200)
|
||||
#define DPNI_CMDID_SET_ERRORS_BEHAVIOR DPNI_CMD(0x20B)
|
||||
|
||||
#define DPNI_CMDID_GET_QDID DPNI_CMD(0x210)
|
||||
|
@ -115,14 +117,19 @@ struct dpni_cmd_open {
|
|||
};
|
||||
|
||||
#define DPNI_BACKUP_POOL(val, order) (((val) & 0x1) << (order))
|
||||
|
||||
struct dpni_cmd_pool {
|
||||
__le16 dpbp_id;
|
||||
u8 priority_mask;
|
||||
u8 pad;
|
||||
};
|
||||
|
||||
struct dpni_cmd_set_pools {
|
||||
/* cmd word 0 */
|
||||
u8 num_dpbp;
|
||||
u8 backup_pool_mask;
|
||||
__le16 pad;
|
||||
/* cmd word 0..4 */
|
||||
__le32 dpbp_id[DPNI_MAX_DPBP];
|
||||
/* cmd word 4..6 */
|
||||
u8 pad;
|
||||
u8 pool_options;
|
||||
struct dpni_cmd_pool pool[DPNI_MAX_DPBP];
|
||||
__le16 buffer_size[DPNI_MAX_DPBP];
|
||||
};
|
||||
|
||||
|
|
|
@ -173,8 +173,12 @@ int dpni_set_pools(struct fsl_mc_io *mc_io,
|
|||
token);
|
||||
cmd_params = (struct dpni_cmd_set_pools *)cmd.params;
|
||||
cmd_params->num_dpbp = cfg->num_dpbp;
|
||||
cmd_params->pool_options = cfg->pool_options;
|
||||
for (i = 0; i < DPNI_MAX_DPBP; i++) {
|
||||
cmd_params->dpbp_id[i] = cpu_to_le32(cfg->pools[i].dpbp_id);
|
||||
cmd_params->pool[i].dpbp_id =
|
||||
cpu_to_le16(cfg->pools[i].dpbp_id);
|
||||
cmd_params->pool[i].priority_mask =
|
||||
cfg->pools[i].priority_mask;
|
||||
cmd_params->buffer_size[i] =
|
||||
cpu_to_le16(cfg->pools[i].buffer_size);
|
||||
cmd_params->backup_pool_mask |=
|
||||
|
|
|
@ -92,19 +92,28 @@ int dpni_close(struct fsl_mc_io *mc_io,
|
|||
u32 cmd_flags,
|
||||
u16 token);
|
||||
|
||||
#define DPNI_POOL_ASSOC_QPRI 0
|
||||
#define DPNI_POOL_ASSOC_QDBIN 1
|
||||
|
||||
/**
|
||||
* struct dpni_pools_cfg - Structure representing buffer pools configuration
|
||||
* @num_dpbp: Number of DPBPs
|
||||
* @pool_options: Buffer assignment options.
|
||||
* This field is a combination of DPNI_POOL_ASSOC_flags
|
||||
* @pools: Array of buffer pools parameters; The number of valid entries
|
||||
* must match 'num_dpbp' value
|
||||
* @pools.dpbp_id: DPBP object ID
|
||||
* @pools.priority: Priority mask that indicates TC's used with this buffer.
|
||||
* If set to 0x00 MC will assume value 0xff.
|
||||
* @pools.buffer_size: Buffer size
|
||||
* @pools.backup_pool: Backup pool
|
||||
*/
|
||||
struct dpni_pools_cfg {
|
||||
u8 num_dpbp;
|
||||
u8 pool_options;
|
||||
struct {
|
||||
int dpbp_id;
|
||||
u8 priority_mask;
|
||||
u16 buffer_size;
|
||||
int backup_pool;
|
||||
} pools[DPNI_MAX_DPBP];
|
||||
|
|
Loading…
Add table
Reference in a new issue