mirror of
https://github.com/Fishwaldo/linux-bl808.git
synced 2025-06-17 20:25:19 +00:00
Daniel Borkmann says: ==================== pull-request: bpf 2020-11-28 1) Do not reference the skb for xsk's generic TX side since when looped back into RX it might crash in generic XDP, from Björn Töpel. 2) Fix umem cleanup on a partially set up xsk socket when being destroyed, from Magnus Karlsson. 3) Fix an incorrect netdev reference count when failing xsk_bind() operation, from Marek Majtyka. 4) Fix bpftool to set an error code on failed calloc() in build_btf_type_table(), from Zhen Lei. * https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf: bpf: Add MAINTAINERS entry for BPF LSM bpftool: Fix error return value in build_btf_type_table net, xsk: Avoid taking multiple skbuff references xsk: Fix incorrect netdev reference count xsk: Fix umem cleanup bug at socket destruct MAINTAINERS: Update XDP and AF_XDP entries ==================== Link: https://lore.kernel.org/r/20201128005104.1205-1-daniel@iogearbox.net Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
commit
3771b82242
9 changed files with 61 additions and 23 deletions
23
MAINTAINERS
23
MAINTAINERS
|
@ -3355,6 +3355,17 @@ S: Supported
|
||||||
F: arch/x86/net/
|
F: arch/x86/net/
|
||||||
X: arch/x86/net/bpf_jit_comp32.c
|
X: arch/x86/net/bpf_jit_comp32.c
|
||||||
|
|
||||||
|
BPF LSM (Security Audit and Enforcement using BPF)
|
||||||
|
M: KP Singh <kpsingh@chromium.org>
|
||||||
|
R: Florent Revest <revest@chromium.org>
|
||||||
|
R: Brendan Jackman <jackmanb@chromium.org>
|
||||||
|
L: bpf@vger.kernel.org
|
||||||
|
S: Maintained
|
||||||
|
F: Documentation/bpf/bpf_lsm.rst
|
||||||
|
F: include/linux/bpf_lsm.h
|
||||||
|
F: kernel/bpf/bpf_lsm.c
|
||||||
|
F: security/bpf/
|
||||||
|
|
||||||
BROADCOM B44 10/100 ETHERNET DRIVER
|
BROADCOM B44 10/100 ETHERNET DRIVER
|
||||||
M: Michael Chan <michael.chan@broadcom.com>
|
M: Michael Chan <michael.chan@broadcom.com>
|
||||||
L: netdev@vger.kernel.org
|
L: netdev@vger.kernel.org
|
||||||
|
@ -19112,12 +19123,17 @@ L: netdev@vger.kernel.org
|
||||||
L: bpf@vger.kernel.org
|
L: bpf@vger.kernel.org
|
||||||
S: Supported
|
S: Supported
|
||||||
F: include/net/xdp.h
|
F: include/net/xdp.h
|
||||||
|
F: include/net/xdp_priv.h
|
||||||
F: include/trace/events/xdp.h
|
F: include/trace/events/xdp.h
|
||||||
F: kernel/bpf/cpumap.c
|
F: kernel/bpf/cpumap.c
|
||||||
F: kernel/bpf/devmap.c
|
F: kernel/bpf/devmap.c
|
||||||
F: net/core/xdp.c
|
F: net/core/xdp.c
|
||||||
N: xdp
|
F: samples/bpf/xdp*
|
||||||
K: xdp
|
F: tools/testing/selftests/bpf/*xdp*
|
||||||
|
F: tools/testing/selftests/bpf/*/*xdp*
|
||||||
|
F: drivers/net/ethernet/*/*/*/*/*xdp*
|
||||||
|
F: drivers/net/ethernet/*/*/*xdp*
|
||||||
|
K: (?:\b|_)xdp(?:\b|_)
|
||||||
|
|
||||||
XDP SOCKETS (AF_XDP)
|
XDP SOCKETS (AF_XDP)
|
||||||
M: Björn Töpel <bjorn.topel@intel.com>
|
M: Björn Töpel <bjorn.topel@intel.com>
|
||||||
|
@ -19126,9 +19142,12 @@ R: Jonathan Lemon <jonathan.lemon@gmail.com>
|
||||||
L: netdev@vger.kernel.org
|
L: netdev@vger.kernel.org
|
||||||
L: bpf@vger.kernel.org
|
L: bpf@vger.kernel.org
|
||||||
S: Maintained
|
S: Maintained
|
||||||
|
F: Documentation/networking/af_xdp.rst
|
||||||
F: include/net/xdp_sock*
|
F: include/net/xdp_sock*
|
||||||
F: include/net/xsk_buff_pool.h
|
F: include/net/xsk_buff_pool.h
|
||||||
F: include/uapi/linux/if_xdp.h
|
F: include/uapi/linux/if_xdp.h
|
||||||
|
F: include/uapi/linux/xdp_diag.h
|
||||||
|
F: include/net/netns/xdp.h
|
||||||
F: net/xdp/
|
F: net/xdp/
|
||||||
F: samples/bpf/xdpsock*
|
F: samples/bpf/xdpsock*
|
||||||
F: tools/lib/bpf/xsk*
|
F: tools/lib/bpf/xsk*
|
||||||
|
|
|
@ -2813,9 +2813,21 @@ u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb,
|
||||||
struct net_device *sb_dev);
|
struct net_device *sb_dev);
|
||||||
u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb,
|
u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb,
|
||||||
struct net_device *sb_dev);
|
struct net_device *sb_dev);
|
||||||
|
|
||||||
int dev_queue_xmit(struct sk_buff *skb);
|
int dev_queue_xmit(struct sk_buff *skb);
|
||||||
int dev_queue_xmit_accel(struct sk_buff *skb, struct net_device *sb_dev);
|
int dev_queue_xmit_accel(struct sk_buff *skb, struct net_device *sb_dev);
|
||||||
int dev_direct_xmit(struct sk_buff *skb, u16 queue_id);
|
int __dev_direct_xmit(struct sk_buff *skb, u16 queue_id);
|
||||||
|
|
||||||
|
static inline int dev_direct_xmit(struct sk_buff *skb, u16 queue_id)
|
||||||
|
{
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
ret = __dev_direct_xmit(skb, queue_id);
|
||||||
|
if (!dev_xmit_complete(ret))
|
||||||
|
kfree_skb(skb);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
int register_netdevice(struct net_device *dev);
|
int register_netdevice(struct net_device *dev);
|
||||||
void unregister_netdevice_queue(struct net_device *dev, struct list_head *head);
|
void unregister_netdevice_queue(struct net_device *dev, struct list_head *head);
|
||||||
void unregister_netdevice_many(struct list_head *head);
|
void unregister_netdevice_many(struct list_head *head);
|
||||||
|
|
|
@ -31,6 +31,7 @@ struct xdp_umem {
|
||||||
struct page **pgs;
|
struct page **pgs;
|
||||||
int id;
|
int id;
|
||||||
struct list_head xsk_dma_list;
|
struct list_head xsk_dma_list;
|
||||||
|
struct work_struct work;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct xsk_map {
|
struct xsk_map {
|
||||||
|
|
|
@ -4180,7 +4180,7 @@ int dev_queue_xmit_accel(struct sk_buff *skb, struct net_device *sb_dev)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(dev_queue_xmit_accel);
|
EXPORT_SYMBOL(dev_queue_xmit_accel);
|
||||||
|
|
||||||
int dev_direct_xmit(struct sk_buff *skb, u16 queue_id)
|
int __dev_direct_xmit(struct sk_buff *skb, u16 queue_id)
|
||||||
{
|
{
|
||||||
struct net_device *dev = skb->dev;
|
struct net_device *dev = skb->dev;
|
||||||
struct sk_buff *orig_skb = skb;
|
struct sk_buff *orig_skb = skb;
|
||||||
|
@ -4210,17 +4210,13 @@ int dev_direct_xmit(struct sk_buff *skb, u16 queue_id)
|
||||||
dev_xmit_recursion_dec();
|
dev_xmit_recursion_dec();
|
||||||
|
|
||||||
local_bh_enable();
|
local_bh_enable();
|
||||||
|
|
||||||
if (!dev_xmit_complete(ret))
|
|
||||||
kfree_skb(skb);
|
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
drop:
|
drop:
|
||||||
atomic_long_inc(&dev->tx_dropped);
|
atomic_long_inc(&dev->tx_dropped);
|
||||||
kfree_skb_list(skb);
|
kfree_skb_list(skb);
|
||||||
return NET_XMIT_DROP;
|
return NET_XMIT_DROP;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(dev_direct_xmit);
|
EXPORT_SYMBOL(__dev_direct_xmit);
|
||||||
|
|
||||||
/*************************************************************************
|
/*************************************************************************
|
||||||
* Receiver routines
|
* Receiver routines
|
||||||
|
|
|
@ -66,18 +66,31 @@ static void xdp_umem_release(struct xdp_umem *umem)
|
||||||
kfree(umem);
|
kfree(umem);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void xdp_umem_release_deferred(struct work_struct *work)
|
||||||
|
{
|
||||||
|
struct xdp_umem *umem = container_of(work, struct xdp_umem, work);
|
||||||
|
|
||||||
|
xdp_umem_release(umem);
|
||||||
|
}
|
||||||
|
|
||||||
void xdp_get_umem(struct xdp_umem *umem)
|
void xdp_get_umem(struct xdp_umem *umem)
|
||||||
{
|
{
|
||||||
refcount_inc(&umem->users);
|
refcount_inc(&umem->users);
|
||||||
}
|
}
|
||||||
|
|
||||||
void xdp_put_umem(struct xdp_umem *umem)
|
void xdp_put_umem(struct xdp_umem *umem, bool defer_cleanup)
|
||||||
{
|
{
|
||||||
if (!umem)
|
if (!umem)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (refcount_dec_and_test(&umem->users))
|
if (refcount_dec_and_test(&umem->users)) {
|
||||||
xdp_umem_release(umem);
|
if (defer_cleanup) {
|
||||||
|
INIT_WORK(&umem->work, xdp_umem_release_deferred);
|
||||||
|
schedule_work(&umem->work);
|
||||||
|
} else {
|
||||||
|
xdp_umem_release(umem);
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static int xdp_umem_pin_pages(struct xdp_umem *umem, unsigned long address)
|
static int xdp_umem_pin_pages(struct xdp_umem *umem, unsigned long address)
|
||||||
|
|
|
@ -9,7 +9,7 @@
|
||||||
#include <net/xdp_sock_drv.h>
|
#include <net/xdp_sock_drv.h>
|
||||||
|
|
||||||
void xdp_get_umem(struct xdp_umem *umem);
|
void xdp_get_umem(struct xdp_umem *umem);
|
||||||
void xdp_put_umem(struct xdp_umem *umem);
|
void xdp_put_umem(struct xdp_umem *umem, bool defer_cleanup);
|
||||||
struct xdp_umem *xdp_umem_create(struct xdp_umem_reg *mr);
|
struct xdp_umem *xdp_umem_create(struct xdp_umem_reg *mr);
|
||||||
|
|
||||||
#endif /* XDP_UMEM_H_ */
|
#endif /* XDP_UMEM_H_ */
|
||||||
|
|
|
@ -411,11 +411,7 @@ static int xsk_generic_xmit(struct sock *sk)
|
||||||
skb_shinfo(skb)->destructor_arg = (void *)(long)desc.addr;
|
skb_shinfo(skb)->destructor_arg = (void *)(long)desc.addr;
|
||||||
skb->destructor = xsk_destruct_skb;
|
skb->destructor = xsk_destruct_skb;
|
||||||
|
|
||||||
/* Hinder dev_direct_xmit from freeing the packet and
|
err = __dev_direct_xmit(skb, xs->queue_id);
|
||||||
* therefore completing it in the destructor
|
|
||||||
*/
|
|
||||||
refcount_inc(&skb->users);
|
|
||||||
err = dev_direct_xmit(skb, xs->queue_id);
|
|
||||||
if (err == NETDEV_TX_BUSY) {
|
if (err == NETDEV_TX_BUSY) {
|
||||||
/* Tell user-space to retry the send */
|
/* Tell user-space to retry the send */
|
||||||
skb->destructor = sock_wfree;
|
skb->destructor = sock_wfree;
|
||||||
|
@ -429,12 +425,10 @@ static int xsk_generic_xmit(struct sock *sk)
|
||||||
/* Ignore NET_XMIT_CN as packet might have been sent */
|
/* Ignore NET_XMIT_CN as packet might have been sent */
|
||||||
if (err == NET_XMIT_DROP) {
|
if (err == NET_XMIT_DROP) {
|
||||||
/* SKB completed but not sent */
|
/* SKB completed but not sent */
|
||||||
kfree_skb(skb);
|
|
||||||
err = -EBUSY;
|
err = -EBUSY;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
consume_skb(skb);
|
|
||||||
sent_frame = true;
|
sent_frame = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1147,7 +1141,7 @@ static void xsk_destruct(struct sock *sk)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (!xp_put_pool(xs->pool))
|
if (!xp_put_pool(xs->pool))
|
||||||
xdp_put_umem(xs->umem);
|
xdp_put_umem(xs->umem, !xs->pool);
|
||||||
|
|
||||||
sk_refcnt_debug_dec(sk);
|
sk_refcnt_debug_dec(sk);
|
||||||
}
|
}
|
||||||
|
|
|
@ -185,8 +185,10 @@ err_unreg_xsk:
|
||||||
err_unreg_pool:
|
err_unreg_pool:
|
||||||
if (!force_zc)
|
if (!force_zc)
|
||||||
err = 0; /* fallback to copy mode */
|
err = 0; /* fallback to copy mode */
|
||||||
if (err)
|
if (err) {
|
||||||
xsk_clear_pool_at_qid(netdev, queue_id);
|
xsk_clear_pool_at_qid(netdev, queue_id);
|
||||||
|
dev_put(netdev);
|
||||||
|
}
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -242,7 +244,7 @@ static void xp_release_deferred(struct work_struct *work)
|
||||||
pool->cq = NULL;
|
pool->cq = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
xdp_put_umem(pool->umem);
|
xdp_put_umem(pool->umem, false);
|
||||||
xp_destroy(pool);
|
xp_destroy(pool);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -693,6 +693,7 @@ build_btf_type_table(struct btf_attach_table *tab, enum bpf_obj_type type,
|
||||||
obj_node = calloc(1, sizeof(*obj_node));
|
obj_node = calloc(1, sizeof(*obj_node));
|
||||||
if (!obj_node) {
|
if (!obj_node) {
|
||||||
p_err("failed to allocate memory: %s", strerror(errno));
|
p_err("failed to allocate memory: %s", strerror(errno));
|
||||||
|
err = -ENOMEM;
|
||||||
goto err_free;
|
goto err_free;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue