mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-04-22 06:13:59 +00:00
net: place xmit recursion in softnet data
This fills a hole in softnet data, so no change in structure size. Also prepares for xmit_more placement in the same spot; skb->xmit_more will be removed in followup patch. Signed-off-by: Florian Westphal <fw@strlen.de> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
74dcb4c1a5
commit
97cdcf37b5
3 changed files with 38 additions and 18 deletions
|
@ -2659,14 +2659,6 @@ void netdev_freemem(struct net_device *dev);
|
||||||
void synchronize_net(void);
|
void synchronize_net(void);
|
||||||
int init_dummy_netdev(struct net_device *dev);
|
int init_dummy_netdev(struct net_device *dev);
|
||||||
|
|
||||||
DECLARE_PER_CPU(int, xmit_recursion);
|
|
||||||
#define XMIT_RECURSION_LIMIT 10
|
|
||||||
|
|
||||||
static inline int dev_recursion_level(void)
|
|
||||||
{
|
|
||||||
return this_cpu_read(xmit_recursion);
|
|
||||||
}
|
|
||||||
|
|
||||||
struct net_device *dev_get_by_index(struct net *net, int ifindex);
|
struct net_device *dev_get_by_index(struct net *net, int ifindex);
|
||||||
struct net_device *__dev_get_by_index(struct net *net, int ifindex);
|
struct net_device *__dev_get_by_index(struct net *net, int ifindex);
|
||||||
struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
|
struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
|
||||||
|
@ -3015,6 +3007,11 @@ struct softnet_data {
|
||||||
#ifdef CONFIG_XFRM_OFFLOAD
|
#ifdef CONFIG_XFRM_OFFLOAD
|
||||||
struct sk_buff_head xfrm_backlog;
|
struct sk_buff_head xfrm_backlog;
|
||||||
#endif
|
#endif
|
||||||
|
/* written and read only by owning cpu: */
|
||||||
|
struct {
|
||||||
|
u16 recursion;
|
||||||
|
u8 more;
|
||||||
|
} xmit;
|
||||||
#ifdef CONFIG_RPS
|
#ifdef CONFIG_RPS
|
||||||
/* input_queue_head should be written by cpu owning this struct,
|
/* input_queue_head should be written by cpu owning this struct,
|
||||||
* and only read by other cpus. Worth using a cache line.
|
* and only read by other cpus. Worth using a cache line.
|
||||||
|
@ -3050,6 +3047,28 @@ static inline void input_queue_tail_incr_save(struct softnet_data *sd,
|
||||||
|
|
||||||
DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
|
DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
|
||||||
|
|
||||||
|
static inline int dev_recursion_level(void)
|
||||||
|
{
|
||||||
|
return __this_cpu_read(softnet_data.xmit.recursion);
|
||||||
|
}
|
||||||
|
|
||||||
|
#define XMIT_RECURSION_LIMIT 10
|
||||||
|
static inline bool dev_xmit_recursion(void)
|
||||||
|
{
|
||||||
|
return unlikely(__this_cpu_read(softnet_data.xmit.recursion) >
|
||||||
|
XMIT_RECURSION_LIMIT);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void dev_xmit_recursion_inc(void)
|
||||||
|
{
|
||||||
|
__this_cpu_inc(softnet_data.xmit.recursion);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void dev_xmit_recursion_dec(void)
|
||||||
|
{
|
||||||
|
__this_cpu_dec(softnet_data.xmit.recursion);
|
||||||
|
}
|
||||||
|
|
||||||
void __netif_schedule(struct Qdisc *q);
|
void __netif_schedule(struct Qdisc *q);
|
||||||
void netif_schedule_queue(struct netdev_queue *txq);
|
void netif_schedule_queue(struct netdev_queue *txq);
|
||||||
|
|
||||||
|
@ -4409,6 +4428,11 @@ static inline netdev_tx_t __netdev_start_xmit(const struct net_device_ops *ops,
|
||||||
return ops->ndo_start_xmit(skb, dev);
|
return ops->ndo_start_xmit(skb, dev);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline bool netdev_xmit_more(void)
|
||||||
|
{
|
||||||
|
return __this_cpu_read(softnet_data.xmit.more);
|
||||||
|
}
|
||||||
|
|
||||||
static inline netdev_tx_t netdev_start_xmit(struct sk_buff *skb, struct net_device *dev,
|
static inline netdev_tx_t netdev_start_xmit(struct sk_buff *skb, struct net_device *dev,
|
||||||
struct netdev_queue *txq, bool more)
|
struct netdev_queue *txq, bool more)
|
||||||
{
|
{
|
||||||
|
|
|
@ -3566,9 +3566,6 @@ static void skb_update_prio(struct sk_buff *skb)
|
||||||
#define skb_update_prio(skb)
|
#define skb_update_prio(skb)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
DEFINE_PER_CPU(int, xmit_recursion);
|
|
||||||
EXPORT_SYMBOL(xmit_recursion);
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* dev_loopback_xmit - loop back @skb
|
* dev_loopback_xmit - loop back @skb
|
||||||
* @net: network namespace this loopback is happening in
|
* @net: network namespace this loopback is happening in
|
||||||
|
@ -3857,8 +3854,7 @@ static int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev)
|
||||||
int cpu = smp_processor_id(); /* ok because BHs are off */
|
int cpu = smp_processor_id(); /* ok because BHs are off */
|
||||||
|
|
||||||
if (txq->xmit_lock_owner != cpu) {
|
if (txq->xmit_lock_owner != cpu) {
|
||||||
if (unlikely(__this_cpu_read(xmit_recursion) >
|
if (dev_xmit_recursion())
|
||||||
XMIT_RECURSION_LIMIT))
|
|
||||||
goto recursion_alert;
|
goto recursion_alert;
|
||||||
|
|
||||||
skb = validate_xmit_skb(skb, dev, &again);
|
skb = validate_xmit_skb(skb, dev, &again);
|
||||||
|
@ -3868,9 +3864,9 @@ static int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev)
|
||||||
HARD_TX_LOCK(dev, txq, cpu);
|
HARD_TX_LOCK(dev, txq, cpu);
|
||||||
|
|
||||||
if (!netif_xmit_stopped(txq)) {
|
if (!netif_xmit_stopped(txq)) {
|
||||||
__this_cpu_inc(xmit_recursion);
|
dev_xmit_recursion_inc();
|
||||||
skb = dev_hard_start_xmit(skb, dev, txq, &rc);
|
skb = dev_hard_start_xmit(skb, dev, txq, &rc);
|
||||||
__this_cpu_dec(xmit_recursion);
|
dev_xmit_recursion_dec();
|
||||||
if (dev_xmit_complete(rc)) {
|
if (dev_xmit_complete(rc)) {
|
||||||
HARD_TX_UNLOCK(dev, txq);
|
HARD_TX_UNLOCK(dev, txq);
|
||||||
goto out;
|
goto out;
|
||||||
|
|
|
@ -2016,7 +2016,7 @@ static inline int __bpf_tx_skb(struct net_device *dev, struct sk_buff *skb)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (unlikely(__this_cpu_read(xmit_recursion) > XMIT_RECURSION_LIMIT)) {
|
if (dev_xmit_recursion()) {
|
||||||
net_crit_ratelimited("bpf: recursion limit reached on datapath, buggy bpf program?\n");
|
net_crit_ratelimited("bpf: recursion limit reached on datapath, buggy bpf program?\n");
|
||||||
kfree_skb(skb);
|
kfree_skb(skb);
|
||||||
return -ENETDOWN;
|
return -ENETDOWN;
|
||||||
|
@ -2024,9 +2024,9 @@ static inline int __bpf_tx_skb(struct net_device *dev, struct sk_buff *skb)
|
||||||
|
|
||||||
skb->dev = dev;
|
skb->dev = dev;
|
||||||
|
|
||||||
__this_cpu_inc(xmit_recursion);
|
dev_xmit_recursion_inc();
|
||||||
ret = dev_queue_xmit(skb);
|
ret = dev_queue_xmit(skb);
|
||||||
__this_cpu_dec(xmit_recursion);
|
dev_xmit_recursion_dec();
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
Loading…
Add table
Reference in a new issue