mm: simplify lock_page_memcg()

Now that migration doesn't clear page->mem_cgroup of live pages anymore,
it's safe to make lock_page_memcg() and the memcg stat functions take
pages, and spare the callers from memcg objects.

[akpm@linux-foundation.org: fix warnings]
Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Suggested-by: Vladimir Davydov <vdavydov@virtuozzo.com>
Acked-by: Vladimir Davydov <vdavydov@virtuozzo.com>
Cc: Michal Hocko <mhocko@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Johannes Weiner 2016-03-15 14:57:22 -07:00 committed by Linus Torvalds
parent 6a93ca8fde
commit 62cccb8c8e
12 changed files with 88 additions and 117 deletions

View file

@ -455,42 +455,42 @@ bool mem_cgroup_oom_synchronize(bool wait);
extern int do_swap_account;
#endif
struct mem_cgroup *lock_page_memcg(struct page *page);
void unlock_page_memcg(struct mem_cgroup *memcg);
void lock_page_memcg(struct page *page);
void unlock_page_memcg(struct page *page);
/**
* mem_cgroup_update_page_stat - update page state statistics
* @memcg: memcg to account against
* @page: the page
* @idx: page state item to account
* @val: number of pages (positive or negative)
*
* Callers must use lock_page_memcg() to prevent double accounting
* when the page is concurrently being moved to another memcg:
*
* memcg = lock_page_memcg(page);
* lock_page_memcg(page);
* if (TestClearPageState(page))
* mem_cgroup_update_page_stat(memcg, state, -1);
* unlock_page_memcg(memcg);
* mem_cgroup_update_page_stat(page, state, -1);
* unlock_page_memcg(page);
*/
static inline void mem_cgroup_update_page_stat(struct mem_cgroup *memcg,
static inline void mem_cgroup_update_page_stat(struct page *page,
enum mem_cgroup_stat_index idx, int val)
{
VM_BUG_ON(!rcu_read_lock_held());
if (memcg)
this_cpu_add(memcg->stat->count[idx], val);
if (page->mem_cgroup)
this_cpu_add(page->mem_cgroup->stat->count[idx], val);
}
static inline void mem_cgroup_inc_page_stat(struct mem_cgroup *memcg,
static inline void mem_cgroup_inc_page_stat(struct page *page,
enum mem_cgroup_stat_index idx)
{
mem_cgroup_update_page_stat(memcg, idx, 1);
mem_cgroup_update_page_stat(page, idx, 1);
}
static inline void mem_cgroup_dec_page_stat(struct mem_cgroup *memcg,
static inline void mem_cgroup_dec_page_stat(struct page *page,
enum mem_cgroup_stat_index idx)
{
mem_cgroup_update_page_stat(memcg, idx, -1);
mem_cgroup_update_page_stat(page, idx, -1);
}
unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
@ -661,12 +661,11 @@ mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
{
}
static inline struct mem_cgroup *lock_page_memcg(struct page *page)
static inline void lock_page_memcg(struct page *page)
{
return NULL;
}
static inline void unlock_page_memcg(struct mem_cgroup *memcg)
static inline void unlock_page_memcg(struct page *page)
{
}
@ -692,12 +691,12 @@ static inline bool mem_cgroup_oom_synchronize(bool wait)
return false;
}
static inline void mem_cgroup_inc_page_stat(struct mem_cgroup *memcg,
static inline void mem_cgroup_inc_page_stat(struct page *page,
enum mem_cgroup_stat_index idx)
{
}
static inline void mem_cgroup_dec_page_stat(struct mem_cgroup *memcg,
static inline void mem_cgroup_dec_page_stat(struct page *page,
enum mem_cgroup_stat_index idx)
{
}