mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-07-23 07:12:09 +00:00
mm: memcontrol: use page lists for uncharge batching
Pages are now uncharged at release time, and all sources of batched uncharges operate on lists of pages. Directly use those lists, and get rid of the per-task batching state. This also batches statistics accounting, in addition to the res counter charges, to reduce IRQ-disabling and re-enabling. Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Acked-by: Michal Hocko <mhocko@suse.cz> Cc: Hugh Dickins <hughd@google.com> Cc: Tejun Heo <tj@kernel.org> Cc: Vladimir Davydov <vdavydov@parallels.com> Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Cc: Vladimir Davydov <vdavydov@parallels.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
0a31bc97c8
commit
747db954ca
6 changed files with 117 additions and 129 deletions
|
@ -59,12 +59,8 @@ int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
|
|||
void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg,
|
||||
bool lrucare);
|
||||
void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg);
|
||||
|
||||
void mem_cgroup_uncharge(struct page *page);
|
||||
|
||||
/* Batched uncharging */
|
||||
void mem_cgroup_uncharge_start(void);
|
||||
void mem_cgroup_uncharge_end(void);
|
||||
void mem_cgroup_uncharge_list(struct list_head *page_list);
|
||||
|
||||
void mem_cgroup_migrate(struct page *oldpage, struct page *newpage,
|
||||
bool lrucare);
|
||||
|
@ -233,11 +229,7 @@ static inline void mem_cgroup_uncharge(struct page *page)
|
|||
{
|
||||
}
|
||||
|
||||
static inline void mem_cgroup_uncharge_start(void)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void mem_cgroup_uncharge_end(void)
|
||||
static inline void mem_cgroup_uncharge_list(struct list_head *page_list)
|
||||
{
|
||||
}
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue