mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-06-28 01:21:58 +00:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
The nf_conntrack_core.c fix in 'net' is not relevant in 'net-next' because we no longer have a per-netns conntrack hash. The ip_gre.c conflict as well as the iwlwifi ones were cases of overlapping changes. Conflicts: drivers/net/wireless/intel/iwlwifi/mvm/tx.c net/ipv4/ip_gre.c net/netfilter/nf_conntrack_core.c Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
909b27f706
144 changed files with 1365 additions and 616 deletions
|
@ -1215,6 +1215,41 @@ static void cgroup_destroy_root(struct cgroup_root *root)
|
|||
cgroup_free_root(root);
|
||||
}
|
||||
|
||||
/*
|
||||
* look up cgroup associated with current task's cgroup namespace on the
|
||||
* specified hierarchy
|
||||
*/
|
||||
static struct cgroup *
|
||||
current_cgns_cgroup_from_root(struct cgroup_root *root)
|
||||
{
|
||||
struct cgroup *res = NULL;
|
||||
struct css_set *cset;
|
||||
|
||||
lockdep_assert_held(&css_set_lock);
|
||||
|
||||
rcu_read_lock();
|
||||
|
||||
cset = current->nsproxy->cgroup_ns->root_cset;
|
||||
if (cset == &init_css_set) {
|
||||
res = &root->cgrp;
|
||||
} else {
|
||||
struct cgrp_cset_link *link;
|
||||
|
||||
list_for_each_entry(link, &cset->cgrp_links, cgrp_link) {
|
||||
struct cgroup *c = link->cgrp;
|
||||
|
||||
if (c->root == root) {
|
||||
res = c;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
BUG_ON(!res);
|
||||
return res;
|
||||
}
|
||||
|
||||
/* look up cgroup associated with given css_set on the specified hierarchy */
|
||||
static struct cgroup *cset_cgroup_from_root(struct css_set *cset,
|
||||
struct cgroup_root *root)
|
||||
|
@ -1593,6 +1628,33 @@ static int rebind_subsystems(struct cgroup_root *dst_root, u16 ss_mask)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int cgroup_show_path(struct seq_file *sf, struct kernfs_node *kf_node,
|
||||
struct kernfs_root *kf_root)
|
||||
{
|
||||
int len = 0;
|
||||
char *buf = NULL;
|
||||
struct cgroup_root *kf_cgroot = cgroup_root_from_kf(kf_root);
|
||||
struct cgroup *ns_cgroup;
|
||||
|
||||
buf = kmalloc(PATH_MAX, GFP_KERNEL);
|
||||
if (!buf)
|
||||
return -ENOMEM;
|
||||
|
||||
spin_lock_bh(&css_set_lock);
|
||||
ns_cgroup = current_cgns_cgroup_from_root(kf_cgroot);
|
||||
len = kernfs_path_from_node(kf_node, ns_cgroup->kn, buf, PATH_MAX);
|
||||
spin_unlock_bh(&css_set_lock);
|
||||
|
||||
if (len >= PATH_MAX)
|
||||
len = -ERANGE;
|
||||
else if (len > 0) {
|
||||
seq_escape(sf, buf, " \t\n\\");
|
||||
len = 0;
|
||||
}
|
||||
kfree(buf);
|
||||
return len;
|
||||
}
|
||||
|
||||
static int cgroup_show_options(struct seq_file *seq,
|
||||
struct kernfs_root *kf_root)
|
||||
{
|
||||
|
@ -5433,6 +5495,7 @@ static struct kernfs_syscall_ops cgroup_kf_syscall_ops = {
|
|||
.mkdir = cgroup_mkdir,
|
||||
.rmdir = cgroup_rmdir,
|
||||
.rename = cgroup_rename,
|
||||
.show_path = cgroup_show_path,
|
||||
};
|
||||
|
||||
static void __init cgroup_init_subsys(struct cgroup_subsys *ss, bool early)
|
||||
|
|
|
@ -351,7 +351,7 @@ static struct srcu_struct pmus_srcu;
|
|||
* 1 - disallow cpu events for unpriv
|
||||
* 2 - disallow kernel profiling for unpriv
|
||||
*/
|
||||
int sysctl_perf_event_paranoid __read_mostly = 1;
|
||||
int sysctl_perf_event_paranoid __read_mostly = 2;
|
||||
|
||||
/* Minimum for 512 kiB + 1 user control page */
|
||||
int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */
|
||||
|
|
|
@ -347,6 +347,7 @@ void perf_aux_output_end(struct perf_output_handle *handle, unsigned long size,
|
|||
bool truncated)
|
||||
{
|
||||
struct ring_buffer *rb = handle->rb;
|
||||
bool wakeup = truncated;
|
||||
unsigned long aux_head;
|
||||
u64 flags = 0;
|
||||
|
||||
|
@ -375,9 +376,16 @@ void perf_aux_output_end(struct perf_output_handle *handle, unsigned long size,
|
|||
aux_head = rb->user_page->aux_head = local_read(&rb->aux_head);
|
||||
|
||||
if (aux_head - local_read(&rb->aux_wakeup) >= rb->aux_watermark) {
|
||||
perf_output_wakeup(handle);
|
||||
wakeup = true;
|
||||
local_add(rb->aux_watermark, &rb->aux_wakeup);
|
||||
}
|
||||
|
||||
if (wakeup) {
|
||||
if (truncated)
|
||||
handle->event->pending_disable = 1;
|
||||
perf_output_wakeup(handle);
|
||||
}
|
||||
|
||||
handle->event = NULL;
|
||||
|
||||
local_set(&rb->aux_nest, 0);
|
||||
|
|
|
@ -1394,6 +1394,7 @@ static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq)
|
|||
!cpumask_test_cpu(later_rq->cpu,
|
||||
&task->cpus_allowed) ||
|
||||
task_running(rq, task) ||
|
||||
!dl_task(task) ||
|
||||
!task_on_rq_queued(task))) {
|
||||
double_unlock_balance(rq, later_rq);
|
||||
later_rq = NULL;
|
||||
|
|
|
@ -3030,7 +3030,14 @@ static int idle_balance(struct rq *this_rq);
|
|||
|
||||
#else /* CONFIG_SMP */
|
||||
|
||||
static inline void update_load_avg(struct sched_entity *se, int update_tg) {}
|
||||
static inline void update_load_avg(struct sched_entity *se, int not_used)
|
||||
{
|
||||
struct cfs_rq *cfs_rq = cfs_rq_of(se);
|
||||
struct rq *rq = rq_of(cfs_rq);
|
||||
|
||||
cpufreq_trigger_update(rq_clock(rq));
|
||||
}
|
||||
|
||||
static inline void
|
||||
enqueue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
|
||||
static inline void
|
||||
|
@ -3181,25 +3188,17 @@ static inline void check_schedstat_required(void)
|
|||
static void
|
||||
enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
|
||||
{
|
||||
bool renorm = !(flags & ENQUEUE_WAKEUP) || (flags & ENQUEUE_WAKING);
|
||||
bool curr = cfs_rq->curr == se;
|
||||
|
||||
/*
|
||||
* If we're the current task, we must renormalise before calling
|
||||
* update_curr().
|
||||
* Update the normalized vruntime before updating min_vruntime
|
||||
* through calling update_curr().
|
||||
*/
|
||||
if (renorm && curr)
|
||||
if (!(flags & ENQUEUE_WAKEUP) || (flags & ENQUEUE_WAKING))
|
||||
se->vruntime += cfs_rq->min_vruntime;
|
||||
|
||||
/*
|
||||
* Update run-time statistics of the 'current'.
|
||||
*/
|
||||
update_curr(cfs_rq);
|
||||
|
||||
/*
|
||||
* Otherwise, renormalise after, such that we're placed at the current
|
||||
* moment in time, instead of some random moment in the past.
|
||||
*/
|
||||
if (renorm && !curr)
|
||||
se->vruntime += cfs_rq->min_vruntime;
|
||||
|
||||
enqueue_entity_load_avg(cfs_rq, se);
|
||||
account_entity_enqueue(cfs_rq, se);
|
||||
update_cfs_shares(cfs_rq);
|
||||
|
@ -3215,7 +3214,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
|
|||
update_stats_enqueue(cfs_rq, se);
|
||||
check_spread(cfs_rq, se);
|
||||
}
|
||||
if (!curr)
|
||||
if (se != cfs_rq->curr)
|
||||
__enqueue_entity(cfs_rq, se);
|
||||
se->on_rq = 1;
|
||||
|
||||
|
|
|
@ -1729,6 +1729,7 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
|
|||
!cpumask_test_cpu(lowest_rq->cpu,
|
||||
tsk_cpus_allowed(task)) ||
|
||||
task_running(rq, task) ||
|
||||
!rt_task(task) ||
|
||||
!task_on_rq_queued(task))) {
|
||||
|
||||
double_unlock_balance(rq, lowest_rq);
|
||||
|
|
|
@ -4554,6 +4554,17 @@ static void rebind_workers(struct worker_pool *pool)
|
|||
pool->attrs->cpumask) < 0);
|
||||
|
||||
spin_lock_irq(&pool->lock);
|
||||
|
||||
/*
|
||||
* XXX: CPU hotplug notifiers are weird and can call DOWN_FAILED
|
||||
* w/o preceding DOWN_PREPARE. Work around it. CPU hotplug is
|
||||
* being reworked and this can go away in time.
|
||||
*/
|
||||
if (!(pool->flags & POOL_DISASSOCIATED)) {
|
||||
spin_unlock_irq(&pool->lock);
|
||||
return;
|
||||
}
|
||||
|
||||
pool->flags &= ~POOL_DISASSOCIATED;
|
||||
|
||||
for_each_pool_worker(worker, pool) {
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue