mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-06-22 06:32:08 +00:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Several conflicts here. NFP driver bug fix adding nfp_netdev_is_nfp_repr() check to nfp_fl_output() needed some adjustments because the code block is in an else block now. Parallel additions to net/pkt_cls.h and net/sch_generic.h A bug fix in __tcp_retransmit_skb() conflicted with some of the rbtree changes in net-next. The tc action RCU callback fixes in 'net' had some overlap with some of the recent tcf_block reworking. Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
e1ea2f9856
252 changed files with 2334 additions and 2095 deletions
|
@ -96,6 +96,14 @@ static inline struct smap_psock *smap_psock_sk(const struct sock *sk)
|
|||
return rcu_dereference_sk_user_data(sk);
|
||||
}
|
||||
|
||||
/* compute the linear packet data range [data, data_end) for skb when
|
||||
* sk_skb type programs are in use.
|
||||
*/
|
||||
static inline void bpf_compute_data_end_sk_skb(struct sk_buff *skb)
|
||||
{
|
||||
TCP_SKB_CB(skb)->bpf.data_end = skb->data + skb_headlen(skb);
|
||||
}
|
||||
|
||||
static int smap_verdict_func(struct smap_psock *psock, struct sk_buff *skb)
|
||||
{
|
||||
struct bpf_prog *prog = READ_ONCE(psock->bpf_verdict);
|
||||
|
@ -117,7 +125,8 @@ static int smap_verdict_func(struct smap_psock *psock, struct sk_buff *skb)
|
|||
preempt_enable();
|
||||
skb->sk = NULL;
|
||||
|
||||
return rc;
|
||||
return rc == SK_PASS ?
|
||||
(TCP_SKB_CB(skb)->bpf.map ? SK_REDIRECT : SK_PASS) : SK_DROP;
|
||||
}
|
||||
|
||||
static void smap_do_verdict(struct smap_psock *psock, struct sk_buff *skb)
|
||||
|
|
|
@ -632,6 +632,11 @@ cpuhp_invoke_ap_callback(int cpu, enum cpuhp_state state, bool bringup,
|
|||
__cpuhp_kick_ap(st);
|
||||
}
|
||||
|
||||
/*
|
||||
* Clean up the leftovers so the next hotplug operation wont use stale
|
||||
* data.
|
||||
*/
|
||||
st->node = st->last = NULL;
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -135,17 +135,26 @@ void irq_gc_ack_clr_bit(struct irq_data *d)
|
|||
}
|
||||
|
||||
/**
|
||||
* irq_gc_mask_disable_reg_and_ack - Mask and ack pending interrupt
|
||||
* irq_gc_mask_disable_and_ack_set - Mask and ack pending interrupt
|
||||
* @d: irq_data
|
||||
*
|
||||
* This generic implementation of the irq_mask_ack method is for chips
|
||||
* with separate enable/disable registers instead of a single mask
|
||||
* register and where a pending interrupt is acknowledged by setting a
|
||||
* bit.
|
||||
*
|
||||
* Note: This is the only permutation currently used. Similar generic
|
||||
* functions should be added here if other permutations are required.
|
||||
*/
|
||||
void irq_gc_mask_disable_reg_and_ack(struct irq_data *d)
|
||||
void irq_gc_mask_disable_and_ack_set(struct irq_data *d)
|
||||
{
|
||||
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
|
||||
struct irq_chip_type *ct = irq_data_get_chip_type(d);
|
||||
u32 mask = d->mask;
|
||||
|
||||
irq_gc_lock(gc);
|
||||
irq_reg_writel(gc, mask, ct->regs.mask);
|
||||
irq_reg_writel(gc, mask, ct->regs.disable);
|
||||
*ct->mask_cache &= ~mask;
|
||||
irq_reg_writel(gc, mask, ct->regs.ack);
|
||||
irq_gc_unlock(gc);
|
||||
}
|
||||
|
|
|
@ -68,6 +68,7 @@ enum {
|
|||
* attach_mutex to avoid changing binding state while
|
||||
* worker_attach_to_pool() is in progress.
|
||||
*/
|
||||
POOL_MANAGER_ACTIVE = 1 << 0, /* being managed */
|
||||
POOL_DISASSOCIATED = 1 << 2, /* cpu can't serve workers */
|
||||
|
||||
/* worker flags */
|
||||
|
@ -165,7 +166,6 @@ struct worker_pool {
|
|||
/* L: hash of busy workers */
|
||||
|
||||
/* see manage_workers() for details on the two manager mutexes */
|
||||
struct mutex manager_arb; /* manager arbitration */
|
||||
struct worker *manager; /* L: purely informational */
|
||||
struct mutex attach_mutex; /* attach/detach exclusion */
|
||||
struct list_head workers; /* A: attached workers */
|
||||
|
@ -299,6 +299,7 @@ static struct workqueue_attrs *wq_update_unbound_numa_attrs_buf;
|
|||
|
||||
static DEFINE_MUTEX(wq_pool_mutex); /* protects pools and workqueues list */
|
||||
static DEFINE_SPINLOCK(wq_mayday_lock); /* protects wq->maydays list */
|
||||
static DECLARE_WAIT_QUEUE_HEAD(wq_manager_wait); /* wait for manager to go away */
|
||||
|
||||
static LIST_HEAD(workqueues); /* PR: list of all workqueues */
|
||||
static bool workqueue_freezing; /* PL: have wqs started freezing? */
|
||||
|
@ -801,7 +802,7 @@ static bool need_to_create_worker(struct worker_pool *pool)
|
|||
/* Do we have too many workers and should some go away? */
|
||||
static bool too_many_workers(struct worker_pool *pool)
|
||||
{
|
||||
bool managing = mutex_is_locked(&pool->manager_arb);
|
||||
bool managing = pool->flags & POOL_MANAGER_ACTIVE;
|
||||
int nr_idle = pool->nr_idle + managing; /* manager is considered idle */
|
||||
int nr_busy = pool->nr_workers - nr_idle;
|
||||
|
||||
|
@ -1980,24 +1981,17 @@ static bool manage_workers(struct worker *worker)
|
|||
{
|
||||
struct worker_pool *pool = worker->pool;
|
||||
|
||||
/*
|
||||
* Anyone who successfully grabs manager_arb wins the arbitration
|
||||
* and becomes the manager. mutex_trylock() on pool->manager_arb
|
||||
* failure while holding pool->lock reliably indicates that someone
|
||||
* else is managing the pool and the worker which failed trylock
|
||||
* can proceed to executing work items. This means that anyone
|
||||
* grabbing manager_arb is responsible for actually performing
|
||||
* manager duties. If manager_arb is grabbed and released without
|
||||
* actual management, the pool may stall indefinitely.
|
||||
*/
|
||||
if (!mutex_trylock(&pool->manager_arb))
|
||||
if (pool->flags & POOL_MANAGER_ACTIVE)
|
||||
return false;
|
||||
|
||||
pool->flags |= POOL_MANAGER_ACTIVE;
|
||||
pool->manager = worker;
|
||||
|
||||
maybe_create_worker(pool);
|
||||
|
||||
pool->manager = NULL;
|
||||
mutex_unlock(&pool->manager_arb);
|
||||
pool->flags &= ~POOL_MANAGER_ACTIVE;
|
||||
wake_up(&wq_manager_wait);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -3248,7 +3242,6 @@ static int init_worker_pool(struct worker_pool *pool)
|
|||
setup_timer(&pool->mayday_timer, pool_mayday_timeout,
|
||||
(unsigned long)pool);
|
||||
|
||||
mutex_init(&pool->manager_arb);
|
||||
mutex_init(&pool->attach_mutex);
|
||||
INIT_LIST_HEAD(&pool->workers);
|
||||
|
||||
|
@ -3318,13 +3311,15 @@ static void put_unbound_pool(struct worker_pool *pool)
|
|||
hash_del(&pool->hash_node);
|
||||
|
||||
/*
|
||||
* Become the manager and destroy all workers. Grabbing
|
||||
* manager_arb prevents @pool's workers from blocking on
|
||||
* attach_mutex.
|
||||
* Become the manager and destroy all workers. This prevents
|
||||
* @pool's workers from blocking on attach_mutex. We're the last
|
||||
* manager and @pool gets freed with the flag set.
|
||||
*/
|
||||
mutex_lock(&pool->manager_arb);
|
||||
|
||||
spin_lock_irq(&pool->lock);
|
||||
wait_event_lock_irq(wq_manager_wait,
|
||||
!(pool->flags & POOL_MANAGER_ACTIVE), pool->lock);
|
||||
pool->flags |= POOL_MANAGER_ACTIVE;
|
||||
|
||||
while ((worker = first_idle_worker(pool)))
|
||||
destroy_worker(worker);
|
||||
WARN_ON(pool->nr_workers || pool->nr_idle);
|
||||
|
@ -3338,8 +3333,6 @@ static void put_unbound_pool(struct worker_pool *pool)
|
|||
if (pool->detach_completion)
|
||||
wait_for_completion(pool->detach_completion);
|
||||
|
||||
mutex_unlock(&pool->manager_arb);
|
||||
|
||||
/* shut down the timers */
|
||||
del_timer_sync(&pool->idle_timer);
|
||||
del_timer_sync(&pool->mayday_timer);
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue