mirror of
https://github.com/Fishwaldo/linux-bl808.git
synced 2025-06-17 20:25:19 +00:00
mm: add vm_insert_mixed
vm_insert_mixed will insert either a raw pfn or a refcounted struct page into the page tables, depending on whether vm_normal_page() will return the page or not. With the introduction of the new pte bit, this is now a too tricky for drivers to be doing themselves. filemap_xip uses this in a subsequent patch. Signed-off-by: Nick Piggin <npiggin@suse.de> Cc: Jared Hulbert <jaredeh@gmail.com> Cc: Carsten Otte <cotte@de.ibm.com> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
7e675137a8
commit
423bad6004
2 changed files with 64 additions and 28 deletions
|
@ -1152,6 +1152,8 @@ int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
|
||||||
int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *);
|
int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *);
|
||||||
int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
|
int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
|
||||||
unsigned long pfn);
|
unsigned long pfn);
|
||||||
|
int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
|
||||||
|
unsigned long pfn);
|
||||||
|
|
||||||
struct page *follow_page(struct vm_area_struct *, unsigned long address,
|
struct page *follow_page(struct vm_area_struct *, unsigned long address,
|
||||||
unsigned int foll_flags);
|
unsigned int foll_flags);
|
||||||
|
|
88
mm/memory.c
88
mm/memory.c
|
@ -1176,8 +1176,10 @@ pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr,
|
||||||
* old drivers should use this, and they needed to mark their
|
* old drivers should use this, and they needed to mark their
|
||||||
* pages reserved for the old functions anyway.
|
* pages reserved for the old functions anyway.
|
||||||
*/
|
*/
|
||||||
static int insert_page(struct mm_struct *mm, unsigned long addr, struct page *page, pgprot_t prot)
|
static int insert_page(struct vm_area_struct *vma, unsigned long addr,
|
||||||
|
struct page *page, pgprot_t prot)
|
||||||
{
|
{
|
||||||
|
struct mm_struct *mm = vma->vm_mm;
|
||||||
int retval;
|
int retval;
|
||||||
pte_t *pte;
|
pte_t *pte;
|
||||||
spinlock_t *ptl;
|
spinlock_t *ptl;
|
||||||
|
@ -1237,17 +1239,46 @@ out:
|
||||||
*
|
*
|
||||||
* The page does not need to be reserved.
|
* The page does not need to be reserved.
|
||||||
*/
|
*/
|
||||||
int vm_insert_page(struct vm_area_struct *vma, unsigned long addr, struct page *page)
|
int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
|
||||||
|
struct page *page)
|
||||||
{
|
{
|
||||||
if (addr < vma->vm_start || addr >= vma->vm_end)
|
if (addr < vma->vm_start || addr >= vma->vm_end)
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
if (!page_count(page))
|
if (!page_count(page))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
vma->vm_flags |= VM_INSERTPAGE;
|
vma->vm_flags |= VM_INSERTPAGE;
|
||||||
return insert_page(vma->vm_mm, addr, page, vma->vm_page_prot);
|
return insert_page(vma, addr, page, vma->vm_page_prot);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(vm_insert_page);
|
EXPORT_SYMBOL(vm_insert_page);
|
||||||
|
|
||||||
|
static int insert_pfn(struct vm_area_struct *vma, unsigned long addr,
|
||||||
|
unsigned long pfn, pgprot_t prot)
|
||||||
|
{
|
||||||
|
struct mm_struct *mm = vma->vm_mm;
|
||||||
|
int retval;
|
||||||
|
pte_t *pte, entry;
|
||||||
|
spinlock_t *ptl;
|
||||||
|
|
||||||
|
retval = -ENOMEM;
|
||||||
|
pte = get_locked_pte(mm, addr, &ptl);
|
||||||
|
if (!pte)
|
||||||
|
goto out;
|
||||||
|
retval = -EBUSY;
|
||||||
|
if (!pte_none(*pte))
|
||||||
|
goto out_unlock;
|
||||||
|
|
||||||
|
/* Ok, finally just insert the thing.. */
|
||||||
|
entry = pte_mkspecial(pfn_pte(pfn, prot));
|
||||||
|
set_pte_at(mm, addr, pte, entry);
|
||||||
|
update_mmu_cache(vma, addr, entry); /* XXX: why not for insert_page? */
|
||||||
|
|
||||||
|
retval = 0;
|
||||||
|
out_unlock:
|
||||||
|
pte_unmap_unlock(pte, ptl);
|
||||||
|
out:
|
||||||
|
return retval;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* vm_insert_pfn - insert single pfn into user vma
|
* vm_insert_pfn - insert single pfn into user vma
|
||||||
* @vma: user vma to map to
|
* @vma: user vma to map to
|
||||||
|
@ -1263,11 +1294,6 @@ EXPORT_SYMBOL(vm_insert_page);
|
||||||
int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
|
int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
|
||||||
unsigned long pfn)
|
unsigned long pfn)
|
||||||
{
|
{
|
||||||
struct mm_struct *mm = vma->vm_mm;
|
|
||||||
int retval;
|
|
||||||
pte_t *pte, entry;
|
|
||||||
spinlock_t *ptl;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Technically, architectures with pte_special can avoid all these
|
* Technically, architectures with pte_special can avoid all these
|
||||||
* restrictions (same for remap_pfn_range). However we would like
|
* restrictions (same for remap_pfn_range). However we would like
|
||||||
|
@ -1280,28 +1306,36 @@ int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
|
||||||
BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
|
BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
|
||||||
BUG_ON((vma->vm_flags & VM_MIXEDMAP) && pfn_valid(pfn));
|
BUG_ON((vma->vm_flags & VM_MIXEDMAP) && pfn_valid(pfn));
|
||||||
|
|
||||||
retval = -ENOMEM;
|
if (addr < vma->vm_start || addr >= vma->vm_end)
|
||||||
pte = get_locked_pte(mm, addr, &ptl);
|
return -EFAULT;
|
||||||
if (!pte)
|
return insert_pfn(vma, addr, pfn, vma->vm_page_prot);
|
||||||
goto out;
|
|
||||||
retval = -EBUSY;
|
|
||||||
if (!pte_none(*pte))
|
|
||||||
goto out_unlock;
|
|
||||||
|
|
||||||
/* Ok, finally just insert the thing.. */
|
|
||||||
entry = pte_mkspecial(pfn_pte(pfn, vma->vm_page_prot));
|
|
||||||
set_pte_at(mm, addr, pte, entry);
|
|
||||||
update_mmu_cache(vma, addr, entry);
|
|
||||||
|
|
||||||
retval = 0;
|
|
||||||
out_unlock:
|
|
||||||
pte_unmap_unlock(pte, ptl);
|
|
||||||
|
|
||||||
out:
|
|
||||||
return retval;
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(vm_insert_pfn);
|
EXPORT_SYMBOL(vm_insert_pfn);
|
||||||
|
|
||||||
|
int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
|
||||||
|
unsigned long pfn)
|
||||||
|
{
|
||||||
|
BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
|
||||||
|
|
||||||
|
if (addr < vma->vm_start || addr >= vma->vm_end)
|
||||||
|
return -EFAULT;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If we don't have pte special, then we have to use the pfn_valid()
|
||||||
|
* based VM_MIXEDMAP scheme (see vm_normal_page), and thus we *must*
|
||||||
|
* refcount the page if pfn_valid is true (hence insert_page rather
|
||||||
|
* than insert_pfn).
|
||||||
|
*/
|
||||||
|
if (!HAVE_PTE_SPECIAL && pfn_valid(pfn)) {
|
||||||
|
struct page *page;
|
||||||
|
|
||||||
|
page = pfn_to_page(pfn);
|
||||||
|
return insert_page(vma, addr, page, vma->vm_page_prot);
|
||||||
|
}
|
||||||
|
return insert_pfn(vma, addr, pfn, vma->vm_page_prot);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(vm_insert_mixed);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* maps a range of physical memory into the requested pages. the old
|
* maps a range of physical memory into the requested pages. the old
|
||||||
* mappings are removed. any references to nonexistent pages results
|
* mappings are removed. any references to nonexistent pages results
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue