net, xdp: Introduce xdp_prepare_buff utility routine

Introduce xdp_prepare_buff utility routine to initialize per-descriptor
xdp_buff fields (e.g. xdp_buff pointers). Rely on xdp_prepare_buff() in
all XDP capable drivers.

Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Reviewed-by: Alexander Duyck <alexanderduyck@fb.com>
Acked-by: Jesper Dangaard Brouer <brouer@redhat.com>
Acked-by: John Fastabend <john.fastabend@gmail.com>
Acked-by: Shay Agroskin <shayagr@amazon.com>
Acked-by: Martin Habets <habetsm.xilinx@gmail.com>
Acked-by: Camelia Groza <camelia.groza@nxp.com>
Acked-by: Marcin Wojtas <mw@semihalf.com>
Link: https://lore.kernel.org/bpf/45f46f12295972a97da8ca01990b3e71501e9d89.1608670965.git.lorenzo@kernel.org
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
This commit is contained in:
Lorenzo Bianconi 2020-12-22 22:09:29 +01:00 committed by Alexei Starovoitov
parent 43b5169d83
commit be9df4aff6
28 changed files with 105 additions and 152 deletions

View file

@ -1585,10 +1585,9 @@ static int ena_xdp_handle_buff(struct ena_ring *rx_ring, struct xdp_buff *xdp)
int ret; int ret;
rx_info = &rx_ring->rx_buffer_info[rx_ring->ena_bufs[0].req_id]; rx_info = &rx_ring->rx_buffer_info[rx_ring->ena_bufs[0].req_id];
xdp->data = page_address(rx_info->page) + rx_info->page_offset; xdp_prepare_buff(xdp, page_address(rx_info->page),
xdp_set_data_meta_invalid(xdp); rx_info->page_offset,
xdp->data_hard_start = page_address(rx_info->page); rx_ring->ena_bufs[0].len, false);
xdp->data_end = xdp->data + rx_ring->ena_bufs[0].len;
/* If for some reason we received a bigger packet than /* If for some reason we received a bigger packet than
* we expect, then we simply drop it * we expect, then we simply drop it
*/ */

View file

@ -135,10 +135,7 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
txr = rxr->bnapi->tx_ring; txr = rxr->bnapi->tx_ring;
/* BNXT_RX_PAGE_MODE(bp) when XDP enabled */ /* BNXT_RX_PAGE_MODE(bp) when XDP enabled */
xdp_init_buff(&xdp, PAGE_SIZE, &rxr->xdp_rxq); xdp_init_buff(&xdp, PAGE_SIZE, &rxr->xdp_rxq);
xdp.data_hard_start = *data_ptr - offset; xdp_prepare_buff(&xdp, *data_ptr - offset, offset, *len, false);
xdp.data = *data_ptr;
xdp_set_data_meta_invalid(&xdp);
xdp.data_end = *data_ptr + *len;
orig_data = xdp.data; orig_data = xdp.data;
rcu_read_lock(); rcu_read_lock();

View file

@ -530,6 +530,7 @@ static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog,
struct cqe_rx_t *cqe_rx, struct snd_queue *sq, struct cqe_rx_t *cqe_rx, struct snd_queue *sq,
struct rcv_queue *rq, struct sk_buff **skb) struct rcv_queue *rq, struct sk_buff **skb)
{ {
unsigned char *hard_start, *data;
struct xdp_buff xdp; struct xdp_buff xdp;
struct page *page; struct page *page;
u32 action; u32 action;
@ -549,10 +550,9 @@ static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog,
xdp_init_buff(&xdp, RCV_FRAG_LEN + XDP_PACKET_HEADROOM, xdp_init_buff(&xdp, RCV_FRAG_LEN + XDP_PACKET_HEADROOM,
&rq->xdp_rxq); &rq->xdp_rxq);
xdp.data_hard_start = page_address(page); hard_start = page_address(page);
xdp.data = (void *)cpu_addr; data = (unsigned char *)cpu_addr;
xdp_set_data_meta_invalid(&xdp); xdp_prepare_buff(&xdp, hard_start, data - hard_start, len, false);
xdp.data_end = xdp.data + len;
orig_data = xdp.data; orig_data = xdp.data;
rcu_read_lock(); rcu_read_lock();

View file

@ -2534,10 +2534,8 @@ static u32 dpaa_run_xdp(struct dpaa_priv *priv, struct qm_fd *fd, void *vaddr,
xdp_init_buff(&xdp, DPAA_BP_RAW_SIZE - DPAA_TX_PRIV_DATA_SIZE, xdp_init_buff(&xdp, DPAA_BP_RAW_SIZE - DPAA_TX_PRIV_DATA_SIZE,
&dpaa_fq->xdp_rxq); &dpaa_fq->xdp_rxq);
xdp.data = vaddr + fd_off; xdp_prepare_buff(&xdp, vaddr + fd_off - XDP_PACKET_HEADROOM,
xdp.data_meta = xdp.data; XDP_PACKET_HEADROOM, qm_fd_get_length(fd), true);
xdp.data_hard_start = xdp.data - XDP_PACKET_HEADROOM;
xdp.data_end = xdp.data + qm_fd_get_length(fd);
/* We reserve a fixed headroom of 256 bytes under the erratum and we /* We reserve a fixed headroom of 256 bytes under the erratum and we
* offer it all to XDP programs to use. If no room is left for the * offer it all to XDP programs to use. If no room is left for the

View file

@ -350,7 +350,7 @@ static u32 dpaa2_eth_run_xdp(struct dpaa2_eth_priv *priv,
struct bpf_prog *xdp_prog; struct bpf_prog *xdp_prog;
struct xdp_buff xdp; struct xdp_buff xdp;
u32 xdp_act = XDP_PASS; u32 xdp_act = XDP_PASS;
int err; int err, offset;
rcu_read_lock(); rcu_read_lock();
@ -358,14 +358,10 @@ static u32 dpaa2_eth_run_xdp(struct dpaa2_eth_priv *priv,
if (!xdp_prog) if (!xdp_prog)
goto out; goto out;
xdp_init_buff(&xdp, offset = dpaa2_fd_get_offset(fd) - XDP_PACKET_HEADROOM;
DPAA2_ETH_RX_BUF_RAW_SIZE - xdp_init_buff(&xdp, DPAA2_ETH_RX_BUF_RAW_SIZE - offset, &ch->xdp_rxq);
(dpaa2_fd_get_offset(fd) - XDP_PACKET_HEADROOM), xdp_prepare_buff(&xdp, vaddr + offset, XDP_PACKET_HEADROOM,
&ch->xdp_rxq); dpaa2_fd_get_len(fd), false);
xdp.data = vaddr + dpaa2_fd_get_offset(fd);
xdp.data_end = xdp.data + dpaa2_fd_get_len(fd);
xdp.data_hard_start = xdp.data - XDP_PACKET_HEADROOM;
xdp_set_data_meta_invalid(&xdp);
xdp_act = bpf_prog_run_xdp(xdp_prog, &xdp); xdp_act = bpf_prog_run_xdp(xdp_prog, &xdp);

View file

@ -2406,12 +2406,12 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
/* retrieve a buffer from the ring */ /* retrieve a buffer from the ring */
if (!skb) { if (!skb) {
xdp.data = page_address(rx_buffer->page) + unsigned int offset = i40e_rx_offset(rx_ring);
rx_buffer->page_offset; unsigned char *hard_start;
xdp.data_meta = xdp.data;
xdp.data_hard_start = xdp.data - hard_start = page_address(rx_buffer->page) +
i40e_rx_offset(rx_ring); rx_buffer->page_offset - offset;
xdp.data_end = xdp.data + size; xdp_prepare_buff(&xdp, hard_start, offset, size, true);
#if (PAGE_SIZE > 4096) #if (PAGE_SIZE > 4096)
/* At larger PAGE_SIZE, frame_sz depend on len size */ /* At larger PAGE_SIZE, frame_sz depend on len size */
xdp.frame_sz = i40e_rx_frame_truesize(rx_ring, size); xdp.frame_sz = i40e_rx_frame_truesize(rx_ring, size);

View file

@ -1104,8 +1104,10 @@ int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
/* start the loop to process Rx packets bounded by 'budget' */ /* start the loop to process Rx packets bounded by 'budget' */
while (likely(total_rx_pkts < (unsigned int)budget)) { while (likely(total_rx_pkts < (unsigned int)budget)) {
unsigned int offset = ice_rx_offset(rx_ring);
union ice_32b_rx_flex_desc *rx_desc; union ice_32b_rx_flex_desc *rx_desc;
struct ice_rx_buf *rx_buf; struct ice_rx_buf *rx_buf;
unsigned char *hard_start;
struct sk_buff *skb; struct sk_buff *skb;
unsigned int size; unsigned int size;
u16 stat_err_bits; u16 stat_err_bits;
@ -1151,10 +1153,9 @@ int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
goto construct_skb; goto construct_skb;
} }
xdp.data = page_address(rx_buf->page) + rx_buf->page_offset; hard_start = page_address(rx_buf->page) + rx_buf->page_offset -
xdp.data_hard_start = xdp.data - ice_rx_offset(rx_ring); offset;
xdp.data_meta = xdp.data; xdp_prepare_buff(&xdp, hard_start, offset, size, true);
xdp.data_end = xdp.data + size;
#if (PAGE_SIZE > 4096) #if (PAGE_SIZE > 4096)
/* At larger PAGE_SIZE, frame_sz depend on len size */ /* At larger PAGE_SIZE, frame_sz depend on len size */
xdp.frame_sz = ice_rx_frame_truesize(rx_ring, size); xdp.frame_sz = ice_rx_frame_truesize(rx_ring, size);

View file

@ -8715,12 +8715,12 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
/* retrieve a buffer from the ring */ /* retrieve a buffer from the ring */
if (!skb) { if (!skb) {
xdp.data = page_address(rx_buffer->page) + unsigned int offset = igb_rx_offset(rx_ring);
rx_buffer->page_offset; unsigned char *hard_start;
xdp.data_meta = xdp.data;
xdp.data_hard_start = xdp.data - hard_start = page_address(rx_buffer->page) +
igb_rx_offset(rx_ring); rx_buffer->page_offset - offset;
xdp.data_end = xdp.data + size; xdp_prepare_buff(&xdp, hard_start, offset, size, true);
#if (PAGE_SIZE > 4096) #if (PAGE_SIZE > 4096)
/* At larger PAGE_SIZE, frame_sz depend on len size */ /* At larger PAGE_SIZE, frame_sz depend on len size */
xdp.frame_sz = igb_rx_frame_truesize(rx_ring, size); xdp.frame_sz = igb_rx_frame_truesize(rx_ring, size);

View file

@ -2335,12 +2335,12 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
/* retrieve a buffer from the ring */ /* retrieve a buffer from the ring */
if (!skb) { if (!skb) {
xdp.data = page_address(rx_buffer->page) + unsigned int offset = ixgbe_rx_offset(rx_ring);
rx_buffer->page_offset; unsigned char *hard_start;
xdp.data_meta = xdp.data;
xdp.data_hard_start = xdp.data - hard_start = page_address(rx_buffer->page) +
ixgbe_rx_offset(rx_ring); rx_buffer->page_offset - offset;
xdp.data_end = xdp.data + size; xdp_prepare_buff(&xdp, hard_start, offset, size, true);
#if (PAGE_SIZE > 4096) #if (PAGE_SIZE > 4096)
/* At larger PAGE_SIZE, frame_sz depend on len size */ /* At larger PAGE_SIZE, frame_sz depend on len size */
xdp.frame_sz = ixgbe_rx_frame_truesize(rx_ring, size); xdp.frame_sz = ixgbe_rx_frame_truesize(rx_ring, size);

View file

@ -1160,12 +1160,12 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
/* retrieve a buffer from the ring */ /* retrieve a buffer from the ring */
if (!skb) { if (!skb) {
xdp.data = page_address(rx_buffer->page) + unsigned int offset = ixgbevf_rx_offset(rx_ring);
rx_buffer->page_offset; unsigned char *hard_start;
xdp.data_meta = xdp.data;
xdp.data_hard_start = xdp.data - hard_start = page_address(rx_buffer->page) +
ixgbevf_rx_offset(rx_ring); rx_buffer->page_offset - offset;
xdp.data_end = xdp.data + size; xdp_prepare_buff(&xdp, hard_start, offset, size, true);
#if (PAGE_SIZE > 4096) #if (PAGE_SIZE > 4096)
/* At larger PAGE_SIZE, frame_sz depend on len size */ /* At larger PAGE_SIZE, frame_sz depend on len size */
xdp.frame_sz = ixgbevf_rx_frame_truesize(rx_ring, size); xdp.frame_sz = ixgbevf_rx_frame_truesize(rx_ring, size);

View file

@ -2263,11 +2263,8 @@ mvneta_swbm_rx_frame(struct mvneta_port *pp,
/* Prefetch header */ /* Prefetch header */
prefetch(data); prefetch(data);
xdp_prepare_buff(xdp, data, pp->rx_offset_correction + MVNETA_MH_SIZE,
xdp->data_hard_start = data; data_len, false);
xdp->data = data + pp->rx_offset_correction + MVNETA_MH_SIZE;
xdp->data_end = xdp->data + data_len;
xdp_set_data_meta_invalid(xdp);
sinfo = xdp_get_shared_info_from_buff(xdp); sinfo = xdp_get_shared_info_from_buff(xdp);
sinfo->nr_frags = 0; sinfo->nr_frags = 0;

View file

@ -3565,17 +3565,15 @@ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
if (xdp_prog) { if (xdp_prog) {
struct xdp_rxq_info *xdp_rxq; struct xdp_rxq_info *xdp_rxq;
xdp.data_hard_start = data;
xdp.data = data + MVPP2_MH_SIZE + MVPP2_SKB_HEADROOM;
xdp.data_end = xdp.data + rx_bytes;
if (bm_pool->pkt_size == MVPP2_BM_SHORT_PKT_SIZE) if (bm_pool->pkt_size == MVPP2_BM_SHORT_PKT_SIZE)
xdp_rxq = &rxq->xdp_rxq_short; xdp_rxq = &rxq->xdp_rxq_short;
else else
xdp_rxq = &rxq->xdp_rxq_long; xdp_rxq = &rxq->xdp_rxq_long;
xdp_init_buff(&xdp, PAGE_SIZE, xdp_rxq); xdp_init_buff(&xdp, PAGE_SIZE, xdp_rxq);
xdp_set_data_meta_invalid(&xdp); xdp_prepare_buff(&xdp, data,
MVPP2_MH_SIZE + MVPP2_SKB_HEADROOM,
rx_bytes, false);
ret = mvpp2_run_xdp(port, rxq, xdp_prog, &xdp, pp, &ps); ret = mvpp2_run_xdp(port, rxq, xdp_prog, &xdp, pp, &ps);

View file

@ -776,10 +776,8 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
priv->frag_info[0].frag_size, priv->frag_info[0].frag_size,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
xdp.data_hard_start = va - frags[0].page_offset; xdp_prepare_buff(&xdp, va - frags[0].page_offset,
xdp.data = va; frags[0].page_offset, length, false);
xdp_set_data_meta_invalid(&xdp);
xdp.data_end = xdp.data + length;
orig_data = xdp.data; orig_data = xdp.data;
act = bpf_prog_run_xdp(xdp_prog, &xdp); act = bpf_prog_run_xdp(xdp_prog, &xdp);

View file

@ -1127,10 +1127,7 @@ static void mlx5e_fill_xdp_buff(struct mlx5e_rq *rq, void *va, u16 headroom,
u32 len, struct xdp_buff *xdp) u32 len, struct xdp_buff *xdp)
{ {
xdp_init_buff(xdp, rq->buff.frame0_sz, &rq->xdp_rxq); xdp_init_buff(xdp, rq->buff.frame0_sz, &rq->xdp_rxq);
xdp->data_hard_start = va; xdp_prepare_buff(xdp, va, headroom, len, false);
xdp->data = va + headroom;
xdp_set_data_meta_invalid(xdp);
xdp->data_end = xdp->data + len;
} }
static struct sk_buff * static struct sk_buff *

View file

@ -1914,10 +1914,10 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
unsigned int dma_off; unsigned int dma_off;
int act; int act;
xdp.data_hard_start = rxbuf->frag + NFP_NET_RX_BUF_HEADROOM; xdp_prepare_buff(&xdp,
xdp.data = orig_data; rxbuf->frag + NFP_NET_RX_BUF_HEADROOM,
xdp.data_meta = orig_data; pkt_off - NFP_NET_RX_BUF_HEADROOM,
xdp.data_end = orig_data + pkt_len; pkt_len, true);
act = bpf_prog_run_xdp(xdp_prog, &xdp); act = bpf_prog_run_xdp(xdp_prog, &xdp);

View file

@ -1091,10 +1091,8 @@ static bool qede_rx_xdp(struct qede_dev *edev,
enum xdp_action act; enum xdp_action act;
xdp_init_buff(&xdp, rxq->rx_buf_seg_size, &rxq->xdp_rxq); xdp_init_buff(&xdp, rxq->rx_buf_seg_size, &rxq->xdp_rxq);
xdp.data_hard_start = page_address(bd->data); xdp_prepare_buff(&xdp, page_address(bd->data), *data_offset,
xdp.data = xdp.data_hard_start + *data_offset; *len, false);
xdp_set_data_meta_invalid(&xdp);
xdp.data_end = xdp.data + *len;
/* Queues always have a full reset currently, so for the time /* Queues always have a full reset currently, so for the time
* being until there's atomic program replace just mark read * being until there's atomic program replace just mark read

View file

@ -294,12 +294,9 @@ static bool efx_do_xdp(struct efx_nic *efx, struct efx_channel *channel,
efx->rx_prefix_size); efx->rx_prefix_size);
xdp_init_buff(&xdp, efx->rx_page_buf_step, &rx_queue->xdp_rxq_info); xdp_init_buff(&xdp, efx->rx_page_buf_step, &rx_queue->xdp_rxq_info);
xdp.data = *ehp;
xdp.data_hard_start = xdp.data - EFX_XDP_HEADROOM;
/* No support yet for XDP metadata */ /* No support yet for XDP metadata */
xdp_set_data_meta_invalid(&xdp); xdp_prepare_buff(&xdp, *ehp - EFX_XDP_HEADROOM, EFX_XDP_HEADROOM,
xdp.data_end = xdp.data + rx_buf->len; rx_buf->len, false);
xdp_act = bpf_prog_run_xdp(xdp_prog, &xdp); xdp_act = bpf_prog_run_xdp(xdp_prog, &xdp);
rcu_read_unlock(); rcu_read_unlock();

View file

@ -1015,10 +1015,8 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget)
dma_dir); dma_dir);
prefetch(desc->addr); prefetch(desc->addr);
xdp.data_hard_start = desc->addr; xdp_prepare_buff(&xdp, desc->addr, NETSEC_RXBUF_HEADROOM,
xdp.data = desc->addr + NETSEC_RXBUF_HEADROOM; pkt_len, false);
xdp_set_data_meta_invalid(&xdp);
xdp.data_end = xdp.data + pkt_len;
if (xdp_prog) { if (xdp_prog) {
xdp_result = netsec_run_xdp(priv, xdp_prog, &xdp); xdp_result = netsec_run_xdp(priv, xdp_prog, &xdp);

View file

@ -392,21 +392,15 @@ static void cpsw_rx_handler(void *token, int len, int status)
} }
if (priv->xdp_prog) { if (priv->xdp_prog) {
xdp_init_buff(&xdp, PAGE_SIZE, &priv->xdp_rxq[ch]); int headroom = CPSW_HEADROOM, size = len;
xdp_init_buff(&xdp, PAGE_SIZE, &priv->xdp_rxq[ch]);
if (status & CPDMA_RX_VLAN_ENCAP) { if (status & CPDMA_RX_VLAN_ENCAP) {
xdp.data = pa + CPSW_HEADROOM + headroom += CPSW_RX_VLAN_ENCAP_HDR_SIZE;
CPSW_RX_VLAN_ENCAP_HDR_SIZE; size -= CPSW_RX_VLAN_ENCAP_HDR_SIZE;
xdp.data_end = xdp.data + len -
CPSW_RX_VLAN_ENCAP_HDR_SIZE;
} else {
xdp.data = pa + CPSW_HEADROOM;
xdp.data_end = xdp.data + len;
} }
xdp_set_data_meta_invalid(&xdp); xdp_prepare_buff(&xdp, pa, headroom, size, false);
xdp.data_hard_start = pa;
port = priv->emac_port + cpsw->data.dual_emac; port = priv->emac_port + cpsw->data.dual_emac;
ret = cpsw_run_xdp(priv, ch, &xdp, page, port); ret = cpsw_run_xdp(priv, ch, &xdp, page, port);

View file

@ -335,21 +335,15 @@ static void cpsw_rx_handler(void *token, int len, int status)
} }
if (priv->xdp_prog) { if (priv->xdp_prog) {
xdp_init_buff(&xdp, PAGE_SIZE, &priv->xdp_rxq[ch]); int headroom = CPSW_HEADROOM, size = len;
xdp_init_buff(&xdp, PAGE_SIZE, &priv->xdp_rxq[ch]);
if (status & CPDMA_RX_VLAN_ENCAP) { if (status & CPDMA_RX_VLAN_ENCAP) {
xdp.data = pa + CPSW_HEADROOM + headroom += CPSW_RX_VLAN_ENCAP_HDR_SIZE;
CPSW_RX_VLAN_ENCAP_HDR_SIZE; size -= CPSW_RX_VLAN_ENCAP_HDR_SIZE;
xdp.data_end = xdp.data + len -
CPSW_RX_VLAN_ENCAP_HDR_SIZE;
} else {
xdp.data = pa + CPSW_HEADROOM;
xdp.data_end = xdp.data + len;
} }
xdp_set_data_meta_invalid(&xdp); xdp_prepare_buff(&xdp, pa, headroom, size, false);
xdp.data_hard_start = pa;
ret = cpsw_run_xdp(priv, ch, &xdp, page, priv->emac_port); ret = cpsw_run_xdp(priv, ch, &xdp, page, priv->emac_port);
if (ret != CPSW_XDP_PASS) if (ret != CPSW_XDP_PASS)

View file

@ -45,10 +45,7 @@ u32 netvsc_run_xdp(struct net_device *ndev, struct netvsc_channel *nvchan,
} }
xdp_init_buff(xdp, PAGE_SIZE, &nvchan->xdp_rxq); xdp_init_buff(xdp, PAGE_SIZE, &nvchan->xdp_rxq);
xdp->data_hard_start = page_address(page); xdp_prepare_buff(xdp, page_address(page), NETVSC_XDP_HDRM, len, false);
xdp->data = xdp->data_hard_start + NETVSC_XDP_HDRM;
xdp_set_data_meta_invalid(xdp);
xdp->data_end = xdp->data + len;
memcpy(xdp->data, data, len); memcpy(xdp->data, data, len);

View file

@ -1600,10 +1600,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
u32 act; u32 act;
xdp_init_buff(&xdp, buflen, &tfile->xdp_rxq); xdp_init_buff(&xdp, buflen, &tfile->xdp_rxq);
xdp.data_hard_start = buf; xdp_prepare_buff(&xdp, buf, pad, len, false);
xdp.data = buf + pad;
xdp_set_data_meta_invalid(&xdp);
xdp.data_end = xdp.data + len;
act = bpf_prog_run_xdp(xdp_prog, &xdp); act = bpf_prog_run_xdp(xdp_prog, &xdp);
if (act == XDP_REDIRECT || act == XDP_TX) { if (act == XDP_REDIRECT || act == XDP_TX) {

View file

@ -710,15 +710,11 @@ static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq,
skb = nskb; skb = nskb;
} }
xdp.data_hard_start = skb->head;
xdp.data = skb_mac_header(skb);
xdp.data_end = xdp.data + pktlen;
xdp.data_meta = xdp.data;
/* SKB "head" area always have tailroom for skb_shared_info */ /* SKB "head" area always have tailroom for skb_shared_info */
frame_sz = (void *)skb_end_pointer(skb) - xdp.data_hard_start; frame_sz = skb_end_pointer(skb) - skb->head;
frame_sz += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); frame_sz += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
xdp_init_buff(&xdp, frame_sz, &rq->xdp_rxq); xdp_init_buff(&xdp, frame_sz, &rq->xdp_rxq);
xdp_prepare_buff(&xdp, skb->head, skb->mac_header, pktlen, true);
orig_data = xdp.data; orig_data = xdp.data;
orig_data_end = xdp.data_end; orig_data_end = xdp.data_end;

View file

@ -690,10 +690,8 @@ static struct sk_buff *receive_small(struct net_device *dev,
} }
xdp_init_buff(&xdp, buflen, &rq->xdp_rxq); xdp_init_buff(&xdp, buflen, &rq->xdp_rxq);
xdp.data_hard_start = buf + VIRTNET_RX_PAD + vi->hdr_len; xdp_prepare_buff(&xdp, buf + VIRTNET_RX_PAD + vi->hdr_len,
xdp.data = xdp.data_hard_start + xdp_headroom; xdp_headroom, len, true);
xdp.data_end = xdp.data + len;
xdp.data_meta = xdp.data;
orig_data = xdp.data; orig_data = xdp.data;
act = bpf_prog_run_xdp(xdp_prog, &xdp); act = bpf_prog_run_xdp(xdp_prog, &xdp);
stats->xdp_packets++; stats->xdp_packets++;
@ -859,10 +857,8 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
*/ */
data = page_address(xdp_page) + offset; data = page_address(xdp_page) + offset;
xdp_init_buff(&xdp, frame_sz - vi->hdr_len, &rq->xdp_rxq); xdp_init_buff(&xdp, frame_sz - vi->hdr_len, &rq->xdp_rxq);
xdp.data_hard_start = data - VIRTIO_XDP_HEADROOM + vi->hdr_len; xdp_prepare_buff(&xdp, data - VIRTIO_XDP_HEADROOM + vi->hdr_len,
xdp.data = data + vi->hdr_len; VIRTIO_XDP_HEADROOM, len - vi->hdr_len, true);
xdp.data_end = xdp.data + (len - vi->hdr_len);
xdp.data_meta = xdp.data;
act = bpf_prog_run_xdp(xdp_prog, &xdp); act = bpf_prog_run_xdp(xdp_prog, &xdp);
stats->xdp_packets++; stats->xdp_packets++;

View file

@ -866,10 +866,8 @@ static u32 xennet_run_xdp(struct netfront_queue *queue, struct page *pdata,
xdp_init_buff(xdp, XEN_PAGE_SIZE - XDP_PACKET_HEADROOM, xdp_init_buff(xdp, XEN_PAGE_SIZE - XDP_PACKET_HEADROOM,
&queue->xdp_rxq); &queue->xdp_rxq);
xdp->data_hard_start = page_address(pdata); xdp_prepare_buff(xdp, page_address(pdata), XDP_PACKET_HEADROOM,
xdp->data = xdp->data_hard_start + XDP_PACKET_HEADROOM; len, false);
xdp_set_data_meta_invalid(xdp);
xdp->data_end = xdp->data + len;
act = bpf_prog_run_xdp(prog, xdp); act = bpf_prog_run_xdp(prog, xdp);
switch (act) { switch (act) {

View file

@ -83,6 +83,18 @@ xdp_init_buff(struct xdp_buff *xdp, u32 frame_sz, struct xdp_rxq_info *rxq)
xdp->rxq = rxq; xdp->rxq = rxq;
} }
static __always_inline void
xdp_prepare_buff(struct xdp_buff *xdp, unsigned char *hard_start,
int headroom, int data_len, const bool meta_valid)
{
unsigned char *data = hard_start + headroom;
xdp->data_hard_start = hard_start;
xdp->data = data;
xdp->data_end = data + data_len;
xdp->data_meta = meta_valid ? data : data + 1;
}
/* Reserve memory area at end-of data area. /* Reserve memory area at end-of data area.
* *
* This macro reserves tailroom in the XDP buffer by limiting the * This macro reserves tailroom in the XDP buffer by limiting the

View file

@ -636,14 +636,11 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
if (IS_ERR(data)) if (IS_ERR(data))
return PTR_ERR(data); return PTR_ERR(data);
xdp.data_hard_start = data;
xdp.data = data + headroom;
xdp.data_meta = xdp.data;
xdp.data_end = xdp.data + size;
rxqueue = __netif_get_rx_queue(current->nsproxy->net_ns->loopback_dev, 0); rxqueue = __netif_get_rx_queue(current->nsproxy->net_ns->loopback_dev, 0);
xdp_init_buff(&xdp, headroom + max_data_sz + tailroom, xdp_init_buff(&xdp, headroom + max_data_sz + tailroom,
&rxqueue->xdp_rxq); &rxqueue->xdp_rxq);
xdp_prepare_buff(&xdp, data, headroom, size, true);
bpf_prog_change_xdp(NULL, prog); bpf_prog_change_xdp(NULL, prog);
ret = bpf_test_run(prog, &xdp, repeat, &retval, &duration, true); ret = bpf_test_run(prog, &xdp, repeat, &retval, &duration, true);
if (ret) if (ret)

View file

@ -4603,14 +4603,14 @@ static u32 netif_receive_generic_xdp(struct sk_buff *skb,
struct xdp_buff *xdp, struct xdp_buff *xdp,
struct bpf_prog *xdp_prog) struct bpf_prog *xdp_prog)
{ {
void *orig_data, *orig_data_end, *hard_start;
struct netdev_rx_queue *rxqueue; struct netdev_rx_queue *rxqueue;
void *orig_data, *orig_data_end;
u32 metalen, act = XDP_DROP; u32 metalen, act = XDP_DROP;
u32 mac_len, frame_sz; u32 mac_len, frame_sz;
__be16 orig_eth_type; __be16 orig_eth_type;
struct ethhdr *eth; struct ethhdr *eth;
bool orig_bcast; bool orig_bcast;
int hlen, off; int off;
/* Reinjected packets coming from act_mirred or similar should /* Reinjected packets coming from act_mirred or similar should
* not get XDP generic processing. * not get XDP generic processing.
@ -4642,25 +4642,23 @@ static u32 netif_receive_generic_xdp(struct sk_buff *skb,
* header. * header.
*/ */
mac_len = skb->data - skb_mac_header(skb); mac_len = skb->data - skb_mac_header(skb);
hlen = skb_headlen(skb) + mac_len; hard_start = skb->data - skb_headroom(skb);
xdp->data = skb->data - mac_len;
xdp->data_meta = xdp->data;
xdp->data_end = xdp->data + hlen;
xdp->data_hard_start = skb->data - skb_headroom(skb);
/* SKB "head" area always have tailroom for skb_shared_info */ /* SKB "head" area always have tailroom for skb_shared_info */
frame_sz = (void *)skb_end_pointer(skb) - xdp->data_hard_start; frame_sz = (void *)skb_end_pointer(skb) - hard_start;
frame_sz += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); frame_sz += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
rxqueue = netif_get_rxqueue(skb);
xdp_init_buff(xdp, frame_sz, &rxqueue->xdp_rxq);
xdp_prepare_buff(xdp, hard_start, skb_headroom(skb) - mac_len,
skb_headlen(skb) + mac_len, true);
orig_data_end = xdp->data_end; orig_data_end = xdp->data_end;
orig_data = xdp->data; orig_data = xdp->data;
eth = (struct ethhdr *)xdp->data; eth = (struct ethhdr *)xdp->data;
orig_bcast = is_multicast_ether_addr_64bits(eth->h_dest); orig_bcast = is_multicast_ether_addr_64bits(eth->h_dest);
orig_eth_type = eth->h_proto; orig_eth_type = eth->h_proto;
rxqueue = netif_get_rxqueue(skb);
xdp_init_buff(xdp, frame_sz, &rxqueue->xdp_rxq);
act = bpf_prog_run_xdp(xdp_prog, xdp); act = bpf_prog_run_xdp(xdp_prog, xdp);
/* check if bpf_xdp_adjust_head was used */ /* check if bpf_xdp_adjust_head was used */