mirror of
https://github.com/Fishwaldo/linux-bl808.git
synced 2025-06-17 20:25:19 +00:00
mm: tail page refcounting optimization for slab and hugetlbfs
This skips the _mapcount mangling for slab and hugetlbfs pages. The main trouble in doing this is to guarantee that PageSlab and PageHeadHuge remains constant for all get_page/put_page run on the tail of slab or hugetlbfs compound pages. Otherwise if they're set during get_page but not set during put_page, the _mapcount of the tail page would underflow. PageHeadHuge will remain true until the compound page is released and enters the buddy allocator so it won't risk to change even if the tail page is the last reference left on the page. PG_slab instead is cleared before the slab frees the head page with put_page, so if the tail pin is released after the slab freed the page, we would have a problem. But in the slab case the tail pin cannot be the last reference left on the page. This is because the slab code is free to reuse the compound page after a kfree/kmem_cache_free without having to check if there's any tail pin left. In turn all tail pins must be always released while the head is still pinned by the slab code and so we know PG_slab will be still set too. Signed-off-by: Andrea Arcangeli <aarcange@redhat.com> Reviewed-by: Khalid Aziz <khalid.aziz@oracle.com> Cc: Pravin Shelar <pshelar@nicira.com> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: Ben Hutchings <bhutchings@solarflare.com> Cc: Christoph Lameter <cl@linux.com> Cc: Johannes Weiner <jweiner@redhat.com> Cc: Mel Gorman <mgorman@suse.de> Cc: Rik van Riel <riel@redhat.com> Cc: Andi Kleen <andi@firstfloor.org> Cc: Minchan Kim <minchan@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
ca641514f4
commit
44518d2b32
4 changed files with 60 additions and 14 deletions
|
@ -414,15 +414,45 @@ static inline int page_count(struct page *page)
|
|||
return atomic_read(&compound_head(page)->_count);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HUGETLB_PAGE
|
||||
extern int PageHeadHuge(struct page *page_head);
|
||||
#else /* CONFIG_HUGETLB_PAGE */
|
||||
static inline int PageHeadHuge(struct page *page_head)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_HUGETLB_PAGE */
|
||||
|
||||
static inline bool __compound_tail_refcounted(struct page *page)
|
||||
{
|
||||
return !PageSlab(page) && !PageHeadHuge(page);
|
||||
}
|
||||
|
||||
/*
|
||||
* This takes a head page as parameter and tells if the
|
||||
* tail page reference counting can be skipped.
|
||||
*
|
||||
* For this to be safe, PageSlab and PageHeadHuge must remain true on
|
||||
* any given page where they return true here, until all tail pins
|
||||
* have been released.
|
||||
*/
|
||||
static inline bool compound_tail_refcounted(struct page *page)
|
||||
{
|
||||
VM_BUG_ON(!PageHead(page));
|
||||
return __compound_tail_refcounted(page);
|
||||
}
|
||||
|
||||
static inline void get_huge_page_tail(struct page *page)
|
||||
{
|
||||
/*
|
||||
* __split_huge_page_refcount() cannot run
|
||||
* from under us.
|
||||
* In turn no need of compound_trans_head here.
|
||||
*/
|
||||
VM_BUG_ON(page_mapcount(page) < 0);
|
||||
VM_BUG_ON(atomic_read(&page->_count) != 0);
|
||||
atomic_inc(&page->_mapcount);
|
||||
if (compound_tail_refcounted(compound_head(page)))
|
||||
atomic_inc(&page->_mapcount);
|
||||
}
|
||||
|
||||
extern bool __get_page_tail(struct page *page);
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue