xsk: Move fill and completion rings to buffer pool

Move the fill and completion rings from the umem to the buffer
pool. This so that we in a later commit can share the umem
between multiple HW queue ids. In this case, we need one fill and
completion ring per queue id. As the buffer pool is per queue id
and napi id this is a natural place for it and one umem
struture can be shared between these buffer pools.

Signed-off-by: Magnus Karlsson <magnus.karlsson@intel.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Björn Töpel <bjorn.topel@intel.com>
Link: https://lore.kernel.org/bpf/1598603189-32145-5-git-send-email-magnus.karlsson@intel.com
This commit is contained in:
Magnus Karlsson 2020-08-28 10:26:18 +02:00 committed by Daniel Borkmann
parent 1c1efc2af1
commit 7361f9c3d7
6 changed files with 52 additions and 49 deletions

View file

@ -18,8 +18,6 @@ struct xsk_queue;
struct xdp_buff;
struct xdp_umem {
struct xsk_queue *fq;
struct xsk_queue *cq;
u64 size;
u32 headroom;
u32 chunk_size;
@ -77,6 +75,8 @@ struct xdp_sock {
struct list_head map_list;
/* Protects map_list */
spinlock_t map_list_lock;
struct xsk_queue *fq_tmp; /* Only as tmp storage before bind */
struct xsk_queue *cq_tmp; /* Only as tmp storage before bind */
};
#ifdef CONFIG_XDP_SOCKETS