mirror of
https://github.com/Fishwaldo/linux-bl808.git
synced 2025-06-17 20:25:19 +00:00
x86: PAT: remove follow_pfnmap_pte in favor of follow_phys
Impact: Cleanup - removes a new function in favor of a recently modified older one. Replace follow_pfnmap_pte in pat code with follow_phys. follow_phys lso returns protection eliminating the need of pte_pgprot call. Using follow_phys also eliminates the need for pte_pa. Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com> Signed-off-by: H. Peter Anvin <hpa@zytor.com>
This commit is contained in:
parent
d87fe6607c
commit
982d789ab7
4 changed files with 11 additions and 70 deletions
|
@ -230,11 +230,6 @@ static inline unsigned long pte_pfn(pte_t pte)
|
||||||
return (pte_val(pte) & PTE_PFN_MASK) >> PAGE_SHIFT;
|
return (pte_val(pte) & PTE_PFN_MASK) >> PAGE_SHIFT;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline u64 pte_pa(pte_t pte)
|
|
||||||
{
|
|
||||||
return pte_val(pte) & PTE_PFN_MASK;
|
|
||||||
}
|
|
||||||
|
|
||||||
#define pte_page(pte) pfn_to_page(pte_pfn(pte))
|
#define pte_page(pte) pfn_to_page(pte_pfn(pte))
|
||||||
|
|
||||||
static inline int pmd_large(pmd_t pte)
|
static inline int pmd_large(pmd_t pte)
|
||||||
|
|
|
@ -685,8 +685,7 @@ int track_pfn_vma_copy(struct vm_area_struct *vma)
|
||||||
int retval = 0;
|
int retval = 0;
|
||||||
unsigned long i, j;
|
unsigned long i, j;
|
||||||
u64 paddr;
|
u64 paddr;
|
||||||
pgprot_t prot;
|
unsigned long prot;
|
||||||
pte_t pte;
|
|
||||||
unsigned long vma_start = vma->vm_start;
|
unsigned long vma_start = vma->vm_start;
|
||||||
unsigned long vma_end = vma->vm_end;
|
unsigned long vma_end = vma->vm_end;
|
||||||
unsigned long vma_size = vma_end - vma_start;
|
unsigned long vma_size = vma_end - vma_start;
|
||||||
|
@ -696,26 +695,22 @@ int track_pfn_vma_copy(struct vm_area_struct *vma)
|
||||||
|
|
||||||
if (is_linear_pfn_mapping(vma)) {
|
if (is_linear_pfn_mapping(vma)) {
|
||||||
/*
|
/*
|
||||||
* reserve the whole chunk starting from vm_pgoff,
|
* reserve the whole chunk covered by vma. We need the
|
||||||
* But, we have to get the protection from pte.
|
* starting address and protection from pte.
|
||||||
*/
|
*/
|
||||||
if (follow_pfnmap_pte(vma, vma_start, &pte)) {
|
if (follow_phys(vma, vma_start, 0, &prot, &paddr)) {
|
||||||
WARN_ON_ONCE(1);
|
WARN_ON_ONCE(1);
|
||||||
return -1;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
prot = pte_pgprot(pte);
|
return reserve_pfn_range(paddr, vma_size, __pgprot(prot));
|
||||||
paddr = (u64)vma->vm_pgoff << PAGE_SHIFT;
|
|
||||||
return reserve_pfn_range(paddr, vma_size, prot);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* reserve entire vma page by page, using pfn and prot from pte */
|
/* reserve entire vma page by page, using pfn and prot from pte */
|
||||||
for (i = 0; i < vma_size; i += PAGE_SIZE) {
|
for (i = 0; i < vma_size; i += PAGE_SIZE) {
|
||||||
if (follow_pfnmap_pte(vma, vma_start + i, &pte))
|
if (follow_phys(vma, vma_start + i, 0, &prot, &paddr))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
paddr = pte_pa(pte);
|
retval = reserve_pfn_range(paddr, PAGE_SIZE, __pgprot(prot));
|
||||||
prot = pte_pgprot(pte);
|
|
||||||
retval = reserve_pfn_range(paddr, PAGE_SIZE, prot);
|
|
||||||
if (retval)
|
if (retval)
|
||||||
goto cleanup_ret;
|
goto cleanup_ret;
|
||||||
}
|
}
|
||||||
|
@ -724,10 +719,9 @@ int track_pfn_vma_copy(struct vm_area_struct *vma)
|
||||||
cleanup_ret:
|
cleanup_ret:
|
||||||
/* Reserve error: Cleanup partial reservation and return error */
|
/* Reserve error: Cleanup partial reservation and return error */
|
||||||
for (j = 0; j < i; j += PAGE_SIZE) {
|
for (j = 0; j < i; j += PAGE_SIZE) {
|
||||||
if (follow_pfnmap_pte(vma, vma_start + j, &pte))
|
if (follow_phys(vma, vma_start + j, 0, &prot, &paddr))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
paddr = pte_pa(pte);
|
|
||||||
free_pfn_range(paddr, PAGE_SIZE);
|
free_pfn_range(paddr, PAGE_SIZE);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -797,6 +791,7 @@ void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn,
|
||||||
{
|
{
|
||||||
unsigned long i;
|
unsigned long i;
|
||||||
u64 paddr;
|
u64 paddr;
|
||||||
|
unsigned long prot;
|
||||||
unsigned long vma_start = vma->vm_start;
|
unsigned long vma_start = vma->vm_start;
|
||||||
unsigned long vma_end = vma->vm_end;
|
unsigned long vma_end = vma->vm_end;
|
||||||
unsigned long vma_size = vma_end - vma_start;
|
unsigned long vma_size = vma_end - vma_start;
|
||||||
|
@ -821,12 +816,9 @@ void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn,
|
||||||
} else {
|
} else {
|
||||||
/* free entire vma, page by page, using the pfn from pte */
|
/* free entire vma, page by page, using the pfn from pte */
|
||||||
for (i = 0; i < vma_size; i += PAGE_SIZE) {
|
for (i = 0; i < vma_size; i += PAGE_SIZE) {
|
||||||
pte_t pte;
|
if (follow_phys(vma, vma_start + i, 0, &prot, &paddr))
|
||||||
|
|
||||||
if (follow_pfnmap_pte(vma, vma_start + i, &pte))
|
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
paddr = pte_pa(pte);
|
|
||||||
free_pfn_range(paddr, PAGE_SIZE);
|
free_pfn_range(paddr, PAGE_SIZE);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1239,9 +1239,6 @@ struct page *follow_page(struct vm_area_struct *, unsigned long address,
|
||||||
#define FOLL_GET 0x04 /* do get_page on page */
|
#define FOLL_GET 0x04 /* do get_page on page */
|
||||||
#define FOLL_ANON 0x08 /* give ZERO_PAGE if no pgtable */
|
#define FOLL_ANON 0x08 /* give ZERO_PAGE if no pgtable */
|
||||||
|
|
||||||
int follow_pfnmap_pte(struct vm_area_struct *vma,
|
|
||||||
unsigned long address, pte_t *ret_ptep);
|
|
||||||
|
|
||||||
typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr,
|
typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr,
|
||||||
void *data);
|
void *data);
|
||||||
extern int apply_to_page_range(struct mm_struct *mm, unsigned long address,
|
extern int apply_to_page_range(struct mm_struct *mm, unsigned long address,
|
||||||
|
|
43
mm/memory.c
43
mm/memory.c
|
@ -1168,49 +1168,6 @@ no_page_table:
|
||||||
return page;
|
return page;
|
||||||
}
|
}
|
||||||
|
|
||||||
int follow_pfnmap_pte(struct vm_area_struct *vma, unsigned long address,
|
|
||||||
pte_t *ret_ptep)
|
|
||||||
{
|
|
||||||
pgd_t *pgd;
|
|
||||||
pud_t *pud;
|
|
||||||
pmd_t *pmd;
|
|
||||||
pte_t *ptep, pte;
|
|
||||||
spinlock_t *ptl;
|
|
||||||
struct page *page;
|
|
||||||
struct mm_struct *mm = vma->vm_mm;
|
|
||||||
|
|
||||||
if (!is_pfn_mapping(vma))
|
|
||||||
goto err;
|
|
||||||
|
|
||||||
page = NULL;
|
|
||||||
pgd = pgd_offset(mm, address);
|
|
||||||
if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
|
|
||||||
goto err;
|
|
||||||
|
|
||||||
pud = pud_offset(pgd, address);
|
|
||||||
if (pud_none(*pud) || unlikely(pud_bad(*pud)))
|
|
||||||
goto err;
|
|
||||||
|
|
||||||
pmd = pmd_offset(pud, address);
|
|
||||||
if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
|
|
||||||
goto err;
|
|
||||||
|
|
||||||
ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
|
|
||||||
|
|
||||||
pte = *ptep;
|
|
||||||
if (!pte_present(pte))
|
|
||||||
goto err_unlock;
|
|
||||||
|
|
||||||
*ret_ptep = pte;
|
|
||||||
pte_unmap_unlock(ptep, ptl);
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
err_unlock:
|
|
||||||
pte_unmap_unlock(ptep, ptl);
|
|
||||||
err:
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Can we do the FOLL_ANON optimization? */
|
/* Can we do the FOLL_ANON optimization? */
|
||||||
static inline int use_zero_page(struct vm_area_struct *vma)
|
static inline int use_zero_page(struct vm_area_struct *vma)
|
||||||
{
|
{
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue