cgroup: use a per-cgroup work for release agent

Instead of using a global work to schedule release agent on removable
cgroups, we change to use a per-cgroup work to do this, which makes
the code much simpler.

v2: use a dedicated work instead of reusing css->destroy_work. (Tejun)

Signed-off-by: Zefan Li <lizefan@huawei.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
This commit is contained in:
Zefan Li 2014-09-18 16:06:19 +08:00 committed by Tejun Heo
parent 0c8fc2c121
commit 971ff49355
2 changed files with 33 additions and 79 deletions

View file

@ -233,13 +233,6 @@ struct cgroup {
*/ */
struct list_head e_csets[CGROUP_SUBSYS_COUNT]; struct list_head e_csets[CGROUP_SUBSYS_COUNT];
/*
* Linked list running through all cgroups that can
* potentially be reaped by the release agent. Protected by
* release_list_lock
*/
struct list_head release_list;
/* /*
* list of pidlists, up to two for each namespace (one for procs, one * list of pidlists, up to two for each namespace (one for procs, one
* for tasks); created on demand. * for tasks); created on demand.
@ -249,6 +242,9 @@ struct cgroup {
/* used to wait for offlining of csses */ /* used to wait for offlining of csses */
wait_queue_head_t offline_waitq; wait_queue_head_t offline_waitq;
/* used to schedule release agent */
struct work_struct release_agent_work;
}; };
#define MAX_CGROUP_ROOT_NAMELEN 64 #define MAX_CGROUP_ROOT_NAMELEN 64

View file

@ -392,12 +392,7 @@ static int notify_on_release(const struct cgroup *cgrp)
; \ ; \
else else
/* the list of cgroups eligible for automatic release. Protected by
* release_list_lock */
static LIST_HEAD(release_list);
static DEFINE_RAW_SPINLOCK(release_list_lock);
static void cgroup_release_agent(struct work_struct *work); static void cgroup_release_agent(struct work_struct *work);
static DECLARE_WORK(release_agent_work, cgroup_release_agent);
static void check_for_release(struct cgroup *cgrp); static void check_for_release(struct cgroup *cgrp);
/* /*
@ -1577,7 +1572,6 @@ static void init_cgroup_housekeeping(struct cgroup *cgrp)
INIT_LIST_HEAD(&cgrp->self.sibling); INIT_LIST_HEAD(&cgrp->self.sibling);
INIT_LIST_HEAD(&cgrp->self.children); INIT_LIST_HEAD(&cgrp->self.children);
INIT_LIST_HEAD(&cgrp->cset_links); INIT_LIST_HEAD(&cgrp->cset_links);
INIT_LIST_HEAD(&cgrp->release_list);
INIT_LIST_HEAD(&cgrp->pidlists); INIT_LIST_HEAD(&cgrp->pidlists);
mutex_init(&cgrp->pidlist_mutex); mutex_init(&cgrp->pidlist_mutex);
cgrp->self.cgroup = cgrp; cgrp->self.cgroup = cgrp;
@ -1587,6 +1581,7 @@ static void init_cgroup_housekeeping(struct cgroup *cgrp)
INIT_LIST_HEAD(&cgrp->e_csets[ssid]); INIT_LIST_HEAD(&cgrp->e_csets[ssid]);
init_waitqueue_head(&cgrp->offline_waitq); init_waitqueue_head(&cgrp->offline_waitq);
INIT_WORK(&cgrp->release_agent_work, cgroup_release_agent);
} }
static void init_cgroup_root(struct cgroup_root *root, static void init_cgroup_root(struct cgroup_root *root,
@ -4342,6 +4337,7 @@ static void css_free_work_fn(struct work_struct *work)
/* cgroup free path */ /* cgroup free path */
atomic_dec(&cgrp->root->nr_cgrps); atomic_dec(&cgrp->root->nr_cgrps);
cgroup_pidlist_destroy_all(cgrp); cgroup_pidlist_destroy_all(cgrp);
cancel_work_sync(&cgrp->release_agent_work);
if (cgroup_parent(cgrp)) { if (cgroup_parent(cgrp)) {
/* /*
@ -4804,12 +4800,6 @@ static int cgroup_destroy_locked(struct cgroup *cgrp)
for_each_css(css, ssid, cgrp) for_each_css(css, ssid, cgrp)
kill_css(css); kill_css(css);
/* CSS_ONLINE is clear, remove from ->release_list for the last time */
raw_spin_lock(&release_list_lock);
if (!list_empty(&cgrp->release_list))
list_del_init(&cgrp->release_list);
raw_spin_unlock(&release_list_lock);
/* /*
* Remove @cgrp directory along with the base files. @cgrp has an * Remove @cgrp directory along with the base files. @cgrp has an
* extra ref on its kn. * extra ref on its kn.
@ -5271,25 +5261,9 @@ void cgroup_exit(struct task_struct *tsk)
static void check_for_release(struct cgroup *cgrp) static void check_for_release(struct cgroup *cgrp)
{ {
if (cgroup_is_releasable(cgrp) && list_empty(&cgrp->cset_links) && if (cgroup_is_releasable(cgrp) && !cgroup_has_tasks(cgrp) &&
!css_has_online_children(&cgrp->self)) { !css_has_online_children(&cgrp->self) && !cgroup_is_dead(cgrp))
/* schedule_work(&cgrp->release_agent_work);
* Control Group is currently removeable. If it's not
* already queued for a userspace notification, queue
* it now
*/
int need_schedule_work = 0;
raw_spin_lock(&release_list_lock);
if (!cgroup_is_dead(cgrp) &&
list_empty(&cgrp->release_list)) {
list_add(&cgrp->release_list, &release_list);
need_schedule_work = 1;
}
raw_spin_unlock(&release_list_lock);
if (need_schedule_work)
schedule_work(&release_agent_work);
}
} }
/* /*
@ -5317,52 +5291,36 @@ static void check_for_release(struct cgroup *cgrp)
*/ */
static void cgroup_release_agent(struct work_struct *work) static void cgroup_release_agent(struct work_struct *work)
{ {
BUG_ON(work != &release_agent_work); struct cgroup *cgrp =
mutex_lock(&cgroup_mutex); container_of(work, struct cgroup, release_agent_work);
raw_spin_lock(&release_list_lock);
while (!list_empty(&release_list)) {
char *argv[3], *envp[3];
int i;
char *pathbuf = NULL, *agentbuf = NULL, *path; char *pathbuf = NULL, *agentbuf = NULL, *path;
struct cgroup *cgrp = list_entry(release_list.next, char *argv[3], *envp[3];
struct cgroup,
release_list); mutex_lock(&cgroup_mutex);
list_del_init(&cgrp->release_list);
raw_spin_unlock(&release_list_lock);
pathbuf = kmalloc(PATH_MAX, GFP_KERNEL); pathbuf = kmalloc(PATH_MAX, GFP_KERNEL);
if (!pathbuf) agentbuf = kstrdup(cgrp->root->release_agent_path, GFP_KERNEL);
goto continue_free; if (!pathbuf || !agentbuf)
goto out;
path = cgroup_path(cgrp, pathbuf, PATH_MAX); path = cgroup_path(cgrp, pathbuf, PATH_MAX);
if (!path) if (!path)
goto continue_free; goto out;
agentbuf = kstrdup(cgrp->root->release_agent_path, GFP_KERNEL);
if (!agentbuf)
goto continue_free;
i = 0; argv[0] = agentbuf;
argv[i++] = agentbuf; argv[1] = path;
argv[i++] = path; argv[2] = NULL;
argv[i] = NULL;
i = 0;
/* minimal command environment */ /* minimal command environment */
envp[i++] = "HOME=/"; envp[0] = "HOME=/";
envp[i++] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin"; envp[1] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin";
envp[i] = NULL; envp[2] = NULL;
/* Drop the lock while we invoke the usermode helper,
* since the exec could involve hitting disk and hence
* be a slow process */
mutex_unlock(&cgroup_mutex); mutex_unlock(&cgroup_mutex);
call_usermodehelper(argv[0], argv, envp, UMH_WAIT_EXEC); call_usermodehelper(argv[0], argv, envp, UMH_WAIT_EXEC);
mutex_lock(&cgroup_mutex); out:
continue_free:
kfree(pathbuf);
kfree(agentbuf); kfree(agentbuf);
raw_spin_lock(&release_list_lock); kfree(pathbuf);
}
raw_spin_unlock(&release_list_lock);
mutex_unlock(&cgroup_mutex);
} }
static int __init cgroup_disable(char *str) static int __init cgroup_disable(char *str)