mirror of
https://github.com/Fishwaldo/linux-bl808.git
synced 2025-06-17 20:25:19 +00:00
x86: PAT: add follow_pfnmp_pte routine to help tracking pfnmap pages - v3
Impact: New currently unused interface. Add a generic interface to follow pfn in a pfnmap vma range. This is used by one of the subsequent x86 PAT related patch to keep track of memory types for vma regions across vma copy and free. Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com> Signed-off-by: H. Peter Anvin <hpa@zytor.com>
This commit is contained in:
parent
3c8bb73ace
commit
e121e41844
2 changed files with 46 additions and 0 deletions
|
@ -1223,6 +1223,9 @@ struct page *follow_page(struct vm_area_struct *, unsigned long address,
|
||||||
#define FOLL_GET 0x04 /* do get_page on page */
|
#define FOLL_GET 0x04 /* do get_page on page */
|
||||||
#define FOLL_ANON 0x08 /* give ZERO_PAGE if no pgtable */
|
#define FOLL_ANON 0x08 /* give ZERO_PAGE if no pgtable */
|
||||||
|
|
||||||
|
int follow_pfnmap_pte(struct vm_area_struct *vma,
|
||||||
|
unsigned long address, pte_t *ret_ptep);
|
||||||
|
|
||||||
typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr,
|
typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr,
|
||||||
void *data);
|
void *data);
|
||||||
extern int apply_to_page_range(struct mm_struct *mm, unsigned long address,
|
extern int apply_to_page_range(struct mm_struct *mm, unsigned long address,
|
||||||
|
|
43
mm/memory.c
43
mm/memory.c
|
@ -1111,6 +1111,49 @@ no_page_table:
|
||||||
return page;
|
return page;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int follow_pfnmap_pte(struct vm_area_struct *vma, unsigned long address,
|
||||||
|
pte_t *ret_ptep)
|
||||||
|
{
|
||||||
|
pgd_t *pgd;
|
||||||
|
pud_t *pud;
|
||||||
|
pmd_t *pmd;
|
||||||
|
pte_t *ptep, pte;
|
||||||
|
spinlock_t *ptl;
|
||||||
|
struct page *page;
|
||||||
|
struct mm_struct *mm = vma->vm_mm;
|
||||||
|
|
||||||
|
if (!is_pfn_mapping(vma))
|
||||||
|
goto err;
|
||||||
|
|
||||||
|
page = NULL;
|
||||||
|
pgd = pgd_offset(mm, address);
|
||||||
|
if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
|
||||||
|
goto err;
|
||||||
|
|
||||||
|
pud = pud_offset(pgd, address);
|
||||||
|
if (pud_none(*pud) || unlikely(pud_bad(*pud)))
|
||||||
|
goto err;
|
||||||
|
|
||||||
|
pmd = pmd_offset(pud, address);
|
||||||
|
if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
|
||||||
|
goto err;
|
||||||
|
|
||||||
|
ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
|
||||||
|
|
||||||
|
pte = *ptep;
|
||||||
|
if (!pte_present(pte))
|
||||||
|
goto err_unlock;
|
||||||
|
|
||||||
|
*ret_ptep = pte;
|
||||||
|
pte_unmap_unlock(ptep, ptl);
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
err_unlock:
|
||||||
|
pte_unmap_unlock(ptep, ptl);
|
||||||
|
err:
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
/* Can we do the FOLL_ANON optimization? */
|
/* Can we do the FOLL_ANON optimization? */
|
||||||
static inline int use_zero_page(struct vm_area_struct *vma)
|
static inline int use_zero_page(struct vm_area_struct *vma)
|
||||||
{
|
{
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue