mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-06-29 10:01:25 +00:00
cgroup: only need to check oldcgrp==newgrp once
In cgroup_attach_proc it is now sufficient to only check that oldcgrp==newcgrp once. Now that we are using threadgroup_lock() during the migrations, oldcgrp will not change. Signed-off-by: Mandeep Singh Baines <msb@chromium.org> Acked-by: Li Zefan <lizf@cn.fujitsu.com> Signed-off-by: Tejun Heo <tj@kernel.org> Cc: containers@lists.linux-foundation.org Cc: cgroups@vger.kernel.org Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: Oleg Nesterov <oleg@redhat.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Paul Menage <paul@paulmenage.org>
This commit is contained in:
parent
b07ef77411
commit
892a2b90ba
1 changed files with 6 additions and 16 deletions
|
@ -2067,7 +2067,7 @@ static int css_set_prefetch(struct cgroup *cgrp, struct css_set *cg,
|
||||||
*/
|
*/
|
||||||
int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
|
int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
|
||||||
{
|
{
|
||||||
int retval, i, group_size, nr_migrating_tasks;
|
int retval, i, group_size;
|
||||||
struct cgroup_subsys *ss, *failed_ss = NULL;
|
struct cgroup_subsys *ss, *failed_ss = NULL;
|
||||||
/* guaranteed to be initialized later, but the compiler needs this */
|
/* guaranteed to be initialized later, but the compiler needs this */
|
||||||
struct css_set *oldcg;
|
struct css_set *oldcg;
|
||||||
|
@ -2118,7 +2118,7 @@ int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
|
||||||
}
|
}
|
||||||
|
|
||||||
tsk = leader;
|
tsk = leader;
|
||||||
i = nr_migrating_tasks = 0;
|
i = 0;
|
||||||
do {
|
do {
|
||||||
struct task_and_cgroup ent;
|
struct task_and_cgroup ent;
|
||||||
|
|
||||||
|
@ -2134,11 +2134,12 @@ int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
|
||||||
*/
|
*/
|
||||||
ent.task = tsk;
|
ent.task = tsk;
|
||||||
ent.cgrp = task_cgroup_from_root(tsk, root);
|
ent.cgrp = task_cgroup_from_root(tsk, root);
|
||||||
|
/* nothing to do if this task is already in the cgroup */
|
||||||
|
if (ent.cgrp == cgrp)
|
||||||
|
continue;
|
||||||
retval = flex_array_put(group, i, &ent, GFP_ATOMIC);
|
retval = flex_array_put(group, i, &ent, GFP_ATOMIC);
|
||||||
BUG_ON(retval != 0);
|
BUG_ON(retval != 0);
|
||||||
i++;
|
i++;
|
||||||
if (ent.cgrp != cgrp)
|
|
||||||
nr_migrating_tasks++;
|
|
||||||
} while_each_thread(leader, tsk);
|
} while_each_thread(leader, tsk);
|
||||||
/* remember the number of threads in the array for later. */
|
/* remember the number of threads in the array for later. */
|
||||||
group_size = i;
|
group_size = i;
|
||||||
|
@ -2148,7 +2149,7 @@ int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
|
||||||
|
|
||||||
/* methods shouldn't be called if no task is actually migrating */
|
/* methods shouldn't be called if no task is actually migrating */
|
||||||
retval = 0;
|
retval = 0;
|
||||||
if (!nr_migrating_tasks)
|
if (!group_size)
|
||||||
goto out_free_group_list;
|
goto out_free_group_list;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -2171,14 +2172,6 @@ int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
|
||||||
INIT_LIST_HEAD(&newcg_list);
|
INIT_LIST_HEAD(&newcg_list);
|
||||||
for (i = 0; i < group_size; i++) {
|
for (i = 0; i < group_size; i++) {
|
||||||
tc = flex_array_get(group, i);
|
tc = flex_array_get(group, i);
|
||||||
/* nothing to do if this task is already in the cgroup */
|
|
||||||
if (tc->cgrp == cgrp)
|
|
||||||
continue;
|
|
||||||
/*
|
|
||||||
* get old css_set pointer. threadgroup is locked so this is
|
|
||||||
* safe against concurrent cgroup_exit() changing this to
|
|
||||||
* init_css_set.
|
|
||||||
*/
|
|
||||||
oldcg = tc->task->cgroups;
|
oldcg = tc->task->cgroups;
|
||||||
|
|
||||||
/* if we don't already have it in the list get a new one */
|
/* if we don't already have it in the list get a new one */
|
||||||
|
@ -2194,9 +2187,6 @@ int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
|
||||||
*/
|
*/
|
||||||
for (i = 0; i < group_size; i++) {
|
for (i = 0; i < group_size; i++) {
|
||||||
tc = flex_array_get(group, i);
|
tc = flex_array_get(group, i);
|
||||||
/* leave current thread as it is if it's already there */
|
|
||||||
if (tc->cgrp == cgrp)
|
|
||||||
continue;
|
|
||||||
retval = cgroup_task_migrate(cgrp, tc->cgrp, tc->task, true);
|
retval = cgroup_task_migrate(cgrp, tc->cgrp, tc->task, true);
|
||||||
BUG_ON(retval);
|
BUG_ON(retval);
|
||||||
}
|
}
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue