mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-06-23 07:01:23 +00:00
memcg: allocate all page_cgroup at boot
Allocate all page_cgroup at boot and remove page_cgroup poitner from struct page. This patch adds an interface as struct page_cgroup *lookup_page_cgroup(struct page*) All FLATMEM/DISCONTIGMEM/SPARSEMEM and MEMORY_HOTPLUG is supported. Remove page_cgroup pointer reduces the amount of memory by - 4 bytes per PAGE_SIZE. - 8 bytes per PAGE_SIZE if memory controller is disabled. (even if configured.) On usual 8GB x86-32 server, this saves 8MB of NORMAL_ZONE memory. On my x86-64 server with 48GB of memory, this saves 96MB of memory. I think this reduction makes sense. By pre-allocation, kmalloc/kfree in charge/uncharge are removed. This means - we're not necessary to be afraid of kmalloc faiulre. (this can happen because of gfp_mask type.) - we can avoid calling kmalloc/kfree. - we can avoid allocating tons of small objects which can be fragmented. - we can know what amount of memory will be used for this extra-lru handling. I added printk message as "allocated %ld bytes of page_cgroup" "please try cgroup_disable=memory option if you don't want" maybe enough informative for users. Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Reviewed-by: Balbir Singh <balbir@linux.vnet.ibm.com> Cc: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
c05555b572
commit
52d4b9ac0b
8 changed files with 438 additions and 194 deletions
103
include/linux/page_cgroup.h
Normal file
103
include/linux/page_cgroup.h
Normal file
|
@ -0,0 +1,103 @@
|
|||
#ifndef __LINUX_PAGE_CGROUP_H
|
||||
#define __LINUX_PAGE_CGROUP_H
|
||||
|
||||
#ifdef CONFIG_CGROUP_MEM_RES_CTLR
|
||||
#include <linux/bit_spinlock.h>
|
||||
/*
|
||||
* Page Cgroup can be considered as an extended mem_map.
|
||||
* A page_cgroup page is associated with every page descriptor. The
|
||||
* page_cgroup helps us identify information about the cgroup
|
||||
* All page cgroups are allocated at boot or memory hotplug event,
|
||||
* then the page cgroup for pfn always exists.
|
||||
*/
|
||||
struct page_cgroup {
|
||||
unsigned long flags;
|
||||
struct mem_cgroup *mem_cgroup;
|
||||
struct page *page;
|
||||
struct list_head lru; /* per cgroup LRU list */
|
||||
};
|
||||
|
||||
void __init pgdat_page_cgroup_init(struct pglist_data *pgdat);
|
||||
void __init page_cgroup_init(void);
|
||||
struct page_cgroup *lookup_page_cgroup(struct page *page);
|
||||
|
||||
enum {
|
||||
/* flags for mem_cgroup */
|
||||
PCG_LOCK, /* page cgroup is locked */
|
||||
PCG_CACHE, /* charged as cache */
|
||||
PCG_USED, /* this object is in use. */
|
||||
/* flags for LRU placement */
|
||||
PCG_ACTIVE, /* page is active in this cgroup */
|
||||
PCG_FILE, /* page is file system backed */
|
||||
PCG_UNEVICTABLE, /* page is unevictableable */
|
||||
};
|
||||
|
||||
#define TESTPCGFLAG(uname, lname) \
|
||||
static inline int PageCgroup##uname(struct page_cgroup *pc) \
|
||||
{ return test_bit(PCG_##lname, &pc->flags); }
|
||||
|
||||
#define SETPCGFLAG(uname, lname) \
|
||||
static inline void SetPageCgroup##uname(struct page_cgroup *pc)\
|
||||
{ set_bit(PCG_##lname, &pc->flags); }
|
||||
|
||||
#define CLEARPCGFLAG(uname, lname) \
|
||||
static inline void ClearPageCgroup##uname(struct page_cgroup *pc) \
|
||||
{ clear_bit(PCG_##lname, &pc->flags); }
|
||||
|
||||
/* Cache flag is set only once (at allocation) */
|
||||
TESTPCGFLAG(Cache, CACHE)
|
||||
|
||||
TESTPCGFLAG(Used, USED)
|
||||
CLEARPCGFLAG(Used, USED)
|
||||
|
||||
/* LRU management flags (from global-lru definition) */
|
||||
TESTPCGFLAG(File, FILE)
|
||||
SETPCGFLAG(File, FILE)
|
||||
CLEARPCGFLAG(File, FILE)
|
||||
|
||||
TESTPCGFLAG(Active, ACTIVE)
|
||||
SETPCGFLAG(Active, ACTIVE)
|
||||
CLEARPCGFLAG(Active, ACTIVE)
|
||||
|
||||
TESTPCGFLAG(Unevictable, UNEVICTABLE)
|
||||
SETPCGFLAG(Unevictable, UNEVICTABLE)
|
||||
CLEARPCGFLAG(Unevictable, UNEVICTABLE)
|
||||
|
||||
static inline int page_cgroup_nid(struct page_cgroup *pc)
|
||||
{
|
||||
return page_to_nid(pc->page);
|
||||
}
|
||||
|
||||
static inline enum zone_type page_cgroup_zid(struct page_cgroup *pc)
|
||||
{
|
||||
return page_zonenum(pc->page);
|
||||
}
|
||||
|
||||
static inline void lock_page_cgroup(struct page_cgroup *pc)
|
||||
{
|
||||
bit_spin_lock(PCG_LOCK, &pc->flags);
|
||||
}
|
||||
|
||||
static inline int trylock_page_cgroup(struct page_cgroup *pc)
|
||||
{
|
||||
return bit_spin_trylock(PCG_LOCK, &pc->flags);
|
||||
}
|
||||
|
||||
static inline void unlock_page_cgroup(struct page_cgroup *pc)
|
||||
{
|
||||
bit_spin_unlock(PCG_LOCK, &pc->flags);
|
||||
}
|
||||
|
||||
#else /* CONFIG_CGROUP_MEM_RES_CTLR */
|
||||
struct page_cgroup;
|
||||
|
||||
static inline void pgdat_page_cgroup_init(struct pglist_data *pgdat)
|
||||
{
|
||||
}
|
||||
|
||||
static inline struct page_cgroup *lookup_page_cgroup(struct page *page)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
#endif
|
||||
#endif
|
Loading…
Add table
Add a link
Reference in a new issue