mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-07-06 06:21:31 +00:00
cgroup: drop @skip_css from cgroup_taskset_for_each()
If !NULL, @skip_css makes cgroup_taskset_for_each() skip the matching css. The intention of the interface is to make it easy to skip css's (cgroup_subsys_states) which already match the migration target; however, this is entirely unnecessary as migration taskset doesn't include tasks which are already in the target cgroup. Drop @skip_css from cgroup_taskset_for_each(). Signed-off-by: Tejun Heo <tj@kernel.org> Acked-by: Li Zefan <lizefan@huawei.com> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Paul Mackerras <paulus@samba.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: Arnaldo Carvalho de Melo <acme@ghostprotocols.net> Cc: Daniel Borkmann <dborkman@redhat.com>
This commit is contained in:
parent
cb0f1fe9ba
commit
924f0d9a20
8 changed files with 11 additions and 15 deletions
|
@ -187,7 +187,7 @@ static void freezer_attach(struct cgroup_subsys_state *new_css,
|
|||
* current state before executing the following - !frozen tasks may
|
||||
* be visible in a FROZEN cgroup and frozen tasks in a THAWED one.
|
||||
*/
|
||||
cgroup_taskset_for_each(task, new_css, tset) {
|
||||
cgroup_taskset_for_each(task, tset) {
|
||||
if (!(freezer->state & CGROUP_FREEZING)) {
|
||||
__thaw_task(task);
|
||||
} else {
|
||||
|
|
|
@ -1398,7 +1398,7 @@ static int cpuset_can_attach(struct cgroup_subsys_state *css,
|
|||
(cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed)))
|
||||
goto out_unlock;
|
||||
|
||||
cgroup_taskset_for_each(task, css, tset) {
|
||||
cgroup_taskset_for_each(task, tset) {
|
||||
/*
|
||||
* Kthreads which disallow setaffinity shouldn't be moved
|
||||
* to a new cpuset; we don't want to change their cpu
|
||||
|
@ -1467,7 +1467,7 @@ static void cpuset_attach(struct cgroup_subsys_state *css,
|
|||
|
||||
guarantee_online_mems(mems_cs, &cpuset_attach_nodemask_to);
|
||||
|
||||
cgroup_taskset_for_each(task, css, tset) {
|
||||
cgroup_taskset_for_each(task, tset) {
|
||||
/*
|
||||
* can_attach beforehand should guarantee that this doesn't
|
||||
* fail. TODO: have a better way to handle failure here
|
||||
|
|
|
@ -8021,7 +8021,7 @@ static void perf_cgroup_attach(struct cgroup_subsys_state *css,
|
|||
{
|
||||
struct task_struct *task;
|
||||
|
||||
cgroup_taskset_for_each(task, css, tset)
|
||||
cgroup_taskset_for_each(task, tset)
|
||||
task_function_call(task, __perf_cgroup_move, task);
|
||||
}
|
||||
|
||||
|
|
|
@ -7600,7 +7600,7 @@ static int cpu_cgroup_can_attach(struct cgroup_subsys_state *css,
|
|||
{
|
||||
struct task_struct *task;
|
||||
|
||||
cgroup_taskset_for_each(task, css, tset) {
|
||||
cgroup_taskset_for_each(task, tset) {
|
||||
#ifdef CONFIG_RT_GROUP_SCHED
|
||||
if (!sched_rt_can_attach(css_tg(css), task))
|
||||
return -EINVAL;
|
||||
|
@ -7618,7 +7618,7 @@ static void cpu_cgroup_attach(struct cgroup_subsys_state *css,
|
|||
{
|
||||
struct task_struct *task;
|
||||
|
||||
cgroup_taskset_for_each(task, css, tset)
|
||||
cgroup_taskset_for_each(task, tset)
|
||||
sched_move_task(task);
|
||||
}
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue