Merge branch 'for-5.1' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/cgroup

Pull cgroup updates from Tejun Heo:

 - Oleg's pids controller accounting update which gets rid of rcu delay
   in pids accounting updates

 - rstat (cgroup hierarchical stat collection mechanism) optimization

 - Doc updates

* 'for-5.1' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/cgroup:
  cpuset: remove unused task_has_mempolicy()
  cgroup, rstat: Don't flush subtree root unless necessary
  cgroup: add documentation for pids.events file
  Documentation: cgroup-v2: eliminate markup warnings
  MAINTAINERS: Update cgroup entry
  cgroup/pids: turn cgroup_subsys->free() into cgroup_subsys->release() to fix the accounting
This commit is contained in:
Linus Torvalds 2019-03-07 10:11:41 -08:00
commit 1fc1cd8399
10 changed files with 28 additions and 29 deletions

View file

@ -1519,7 +1519,7 @@ protected workload.
The limits are only applied at the peer level in the hierarchy. This means that The limits are only applied at the peer level in the hierarchy. This means that
in the diagram below, only groups A, B, and C will influence each other, and in the diagram below, only groups A, B, and C will influence each other, and
groups D and F will influence each other. Group G will influence nobody. groups D and F will influence each other. Group G will influence nobody::
[root] [root]
/ | \ / | \

View file

@ -33,6 +33,9 @@ limit in the hierarchy is followed).
pids.current tracks all child cgroup hierarchies, so parent/pids.current is a pids.current tracks all child cgroup hierarchies, so parent/pids.current is a
superset of parent/child/pids.current. superset of parent/child/pids.current.
The pids.events file contains event counters:
- max: Number of times fork failed because limit was hit.
Example Example
------- -------

View file

@ -3970,9 +3970,10 @@ M: Johannes Weiner <hannes@cmpxchg.org>
L: cgroups@vger.kernel.org L: cgroups@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tj/cgroup.git T: git git://git.kernel.org/pub/scm/linux/kernel/git/tj/cgroup.git
S: Maintained S: Maintained
F: Documentation/cgroup* F: Documentation/admin-guide/cgroup-v2.rst
F: Documentation/cgroup-v1/
F: include/linux/cgroup* F: include/linux/cgroup*
F: kernel/cgroup* F: kernel/cgroup/
CONTROL GROUP - CPUSET CONTROL GROUP - CPUSET
M: Li Zefan <lizefan@huawei.com> M: Li Zefan <lizefan@huawei.com>

View file

@ -606,7 +606,7 @@ struct cgroup_subsys {
void (*cancel_fork)(struct task_struct *task); void (*cancel_fork)(struct task_struct *task);
void (*fork)(struct task_struct *task); void (*fork)(struct task_struct *task);
void (*exit)(struct task_struct *task); void (*exit)(struct task_struct *task);
void (*free)(struct task_struct *task); void (*release)(struct task_struct *task);
void (*bind)(struct cgroup_subsys_state *root_css); void (*bind)(struct cgroup_subsys_state *root_css);
bool early_init:1; bool early_init:1;

View file

@ -121,6 +121,7 @@ extern int cgroup_can_fork(struct task_struct *p);
extern void cgroup_cancel_fork(struct task_struct *p); extern void cgroup_cancel_fork(struct task_struct *p);
extern void cgroup_post_fork(struct task_struct *p); extern void cgroup_post_fork(struct task_struct *p);
void cgroup_exit(struct task_struct *p); void cgroup_exit(struct task_struct *p);
void cgroup_release(struct task_struct *p);
void cgroup_free(struct task_struct *p); void cgroup_free(struct task_struct *p);
int cgroup_init_early(void); int cgroup_init_early(void);
@ -697,6 +698,7 @@ static inline int cgroup_can_fork(struct task_struct *p) { return 0; }
static inline void cgroup_cancel_fork(struct task_struct *p) {} static inline void cgroup_cancel_fork(struct task_struct *p) {}
static inline void cgroup_post_fork(struct task_struct *p) {} static inline void cgroup_post_fork(struct task_struct *p) {}
static inline void cgroup_exit(struct task_struct *p) {} static inline void cgroup_exit(struct task_struct *p) {}
static inline void cgroup_release(struct task_struct *p) {}
static inline void cgroup_free(struct task_struct *p) {} static inline void cgroup_free(struct task_struct *p) {}
static inline int cgroup_init_early(void) { return 0; } static inline int cgroup_init_early(void) { return 0; }

View file

@ -197,7 +197,7 @@ static u64 css_serial_nr_next = 1;
*/ */
static u16 have_fork_callback __read_mostly; static u16 have_fork_callback __read_mostly;
static u16 have_exit_callback __read_mostly; static u16 have_exit_callback __read_mostly;
static u16 have_free_callback __read_mostly; static u16 have_release_callback __read_mostly;
static u16 have_canfork_callback __read_mostly; static u16 have_canfork_callback __read_mostly;
/* cgroup namespace for init task */ /* cgroup namespace for init task */
@ -5326,7 +5326,7 @@ static void __init cgroup_init_subsys(struct cgroup_subsys *ss, bool early)
have_fork_callback |= (bool)ss->fork << ss->id; have_fork_callback |= (bool)ss->fork << ss->id;
have_exit_callback |= (bool)ss->exit << ss->id; have_exit_callback |= (bool)ss->exit << ss->id;
have_free_callback |= (bool)ss->free << ss->id; have_release_callback |= (bool)ss->release << ss->id;
have_canfork_callback |= (bool)ss->can_fork << ss->id; have_canfork_callback |= (bool)ss->can_fork << ss->id;
/* At system boot, before all subsystems have been /* At system boot, before all subsystems have been
@ -5762,16 +5762,19 @@ void cgroup_exit(struct task_struct *tsk)
} while_each_subsys_mask(); } while_each_subsys_mask();
} }
void cgroup_free(struct task_struct *task) void cgroup_release(struct task_struct *task)
{ {
struct css_set *cset = task_css_set(task);
struct cgroup_subsys *ss; struct cgroup_subsys *ss;
int ssid; int ssid;
do_each_subsys_mask(ss, ssid, have_free_callback) { do_each_subsys_mask(ss, ssid, have_release_callback) {
ss->free(task); ss->release(task);
} while_each_subsys_mask(); } while_each_subsys_mask();
}
void cgroup_free(struct task_struct *task)
{
struct css_set *cset = task_css_set(task);
put_css_set(cset); put_css_set(cset);
} }

View file

@ -203,19 +203,6 @@ static inline struct cpuset *parent_cs(struct cpuset *cs)
return css_cs(cs->css.parent); return css_cs(cs->css.parent);
} }
#ifdef CONFIG_NUMA
static inline bool task_has_mempolicy(struct task_struct *task)
{
return task->mempolicy;
}
#else
static inline bool task_has_mempolicy(struct task_struct *task)
{
return false;
}
#endif
/* bits in struct cpuset flags field */ /* bits in struct cpuset flags field */
typedef enum { typedef enum {
CS_ONLINE, CS_ONLINE,

View file

@ -247,7 +247,7 @@ static void pids_cancel_fork(struct task_struct *task)
pids_uncharge(pids, 1); pids_uncharge(pids, 1);
} }
static void pids_free(struct task_struct *task) static void pids_release(struct task_struct *task)
{ {
struct pids_cgroup *pids = css_pids(task_css(task, pids_cgrp_id)); struct pids_cgroup *pids = css_pids(task_css(task, pids_cgrp_id));
@ -342,7 +342,7 @@ struct cgroup_subsys pids_cgrp_subsys = {
.cancel_attach = pids_cancel_attach, .cancel_attach = pids_cancel_attach,
.can_fork = pids_can_fork, .can_fork = pids_can_fork,
.cancel_fork = pids_cancel_fork, .cancel_fork = pids_cancel_fork,
.free = pids_free, .release = pids_release,
.legacy_cftypes = pids_files, .legacy_cftypes = pids_files,
.dfl_cftypes = pids_files, .dfl_cftypes = pids_files,
.threaded = true, .threaded = true,

View file

@ -87,7 +87,6 @@ static struct cgroup *cgroup_rstat_cpu_pop_updated(struct cgroup *pos,
struct cgroup *root, int cpu) struct cgroup *root, int cpu)
{ {
struct cgroup_rstat_cpu *rstatc; struct cgroup_rstat_cpu *rstatc;
struct cgroup *parent;
if (pos == root) if (pos == root)
return NULL; return NULL;
@ -115,8 +114,8 @@ static struct cgroup *cgroup_rstat_cpu_pop_updated(struct cgroup *pos,
* However, due to the way we traverse, @pos will be the first * However, due to the way we traverse, @pos will be the first
* child in most cases. The only exception is @root. * child in most cases. The only exception is @root.
*/ */
parent = cgroup_parent(pos); if (rstatc->updated_next) {
if (parent && rstatc->updated_next) { struct cgroup *parent = cgroup_parent(pos);
struct cgroup_rstat_cpu *prstatc = cgroup_rstat_cpu(parent, cpu); struct cgroup_rstat_cpu *prstatc = cgroup_rstat_cpu(parent, cpu);
struct cgroup_rstat_cpu *nrstatc; struct cgroup_rstat_cpu *nrstatc;
struct cgroup **nextp; struct cgroup **nextp;
@ -140,11 +139,14 @@ static struct cgroup *cgroup_rstat_cpu_pop_updated(struct cgroup *pos,
* updated stat. * updated stat.
*/ */
smp_mb(); smp_mb();
}
return pos; return pos;
} }
/* only happens for @root */
return NULL;
}
/* see cgroup_rstat_flush() */ /* see cgroup_rstat_flush() */
static void cgroup_rstat_flush_locked(struct cgroup *cgrp, bool may_sleep) static void cgroup_rstat_flush_locked(struct cgroup *cgrp, bool may_sleep)
__releases(&cgroup_rstat_lock) __acquires(&cgroup_rstat_lock) __releases(&cgroup_rstat_lock) __acquires(&cgroup_rstat_lock)

View file

@ -219,6 +219,7 @@ repeat:
} }
write_unlock_irq(&tasklist_lock); write_unlock_irq(&tasklist_lock);
cgroup_release(p);
release_thread(p); release_thread(p);
call_rcu(&p->rcu, delayed_put_task_struct); call_rcu(&p->rcu, delayed_put_task_struct);