mirror of
https://github.com/Fishwaldo/linux-bl808.git
synced 2025-06-17 20:25:19 +00:00
smaps: break out smaps_pte_entry() from smaps_pte_range()
We will use smaps_pte_entry() in a moment to handle both small and transparent large pages. But, we must break it out of smaps_pte_range() first. Signed-off-by: Dave Hansen <dave@linux.vnet.ibm.com> Acked-by: Mel Gorman <mel@csn.ul.ie> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Acked-by: David Rientjes <rientjes@google.com> Reviewed-by: Eric B Munson <emunson@mgebm.net> Tested-by: Eric B Munson <emunson@mgebm.net> Cc: Michael J Wolf <mjwolf@us.ibm.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Matt Mackall <mpm@selenic.com> Cc: Jeremy Fitzhardinge <jeremy@goop.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
033193275b
commit
ae11c4d9f6
1 changed files with 47 additions and 40 deletions
|
@ -333,33 +333,26 @@ struct mem_size_stats {
|
||||||
u64 pss;
|
u64 pss;
|
||||||
};
|
};
|
||||||
|
|
||||||
static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
|
|
||||||
|
static void smaps_pte_entry(pte_t ptent, unsigned long addr,
|
||||||
struct mm_walk *walk)
|
struct mm_walk *walk)
|
||||||
{
|
{
|
||||||
struct mem_size_stats *mss = walk->private;
|
struct mem_size_stats *mss = walk->private;
|
||||||
struct vm_area_struct *vma = mss->vma;
|
struct vm_area_struct *vma = mss->vma;
|
||||||
pte_t *pte, ptent;
|
|
||||||
spinlock_t *ptl;
|
|
||||||
struct page *page;
|
struct page *page;
|
||||||
int mapcount;
|
int mapcount;
|
||||||
|
|
||||||
split_huge_page_pmd(walk->mm, pmd);
|
|
||||||
|
|
||||||
pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
|
|
||||||
for (; addr != end; pte++, addr += PAGE_SIZE) {
|
|
||||||
ptent = *pte;
|
|
||||||
|
|
||||||
if (is_swap_pte(ptent)) {
|
if (is_swap_pte(ptent)) {
|
||||||
mss->swap += PAGE_SIZE;
|
mss->swap += PAGE_SIZE;
|
||||||
continue;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!pte_present(ptent))
|
if (!pte_present(ptent))
|
||||||
continue;
|
return;
|
||||||
|
|
||||||
page = vm_normal_page(vma, addr, ptent);
|
page = vm_normal_page(vma, addr, ptent);
|
||||||
if (!page)
|
if (!page)
|
||||||
continue;
|
return;
|
||||||
|
|
||||||
if (PageAnon(page))
|
if (PageAnon(page))
|
||||||
mss->anonymous += PAGE_SIZE;
|
mss->anonymous += PAGE_SIZE;
|
||||||
|
@ -382,7 +375,21 @@ static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
|
||||||
mss->private_clean += PAGE_SIZE;
|
mss->private_clean += PAGE_SIZE;
|
||||||
mss->pss += (PAGE_SIZE << PSS_SHIFT);
|
mss->pss += (PAGE_SIZE << PSS_SHIFT);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
|
||||||
|
struct mm_walk *walk)
|
||||||
|
{
|
||||||
|
struct mem_size_stats *mss = walk->private;
|
||||||
|
struct vm_area_struct *vma = mss->vma;
|
||||||
|
pte_t *pte;
|
||||||
|
spinlock_t *ptl;
|
||||||
|
|
||||||
|
split_huge_page_pmd(walk->mm, pmd);
|
||||||
|
|
||||||
|
pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
|
||||||
|
for (; addr != end; pte++, addr += PAGE_SIZE)
|
||||||
|
smaps_pte_entry(*pte, addr, walk);
|
||||||
pte_unmap_unlock(pte - 1, ptl);
|
pte_unmap_unlock(pte - 1, ptl);
|
||||||
cond_resched();
|
cond_resched();
|
||||||
return 0;
|
return 0;
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue