mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-04-01 12:04:08 +00:00
mm: move mm_populate()-related code to mm/gup.c
It's odd that we have populate_vma_page_range() and __mm_populate() in mm/mlock.c. It's implementation of generic memory population and mlocking is one of possible side effect, if VM_LOCKED is set. __get_user_pages() is core of the implementation. Let's move the code into mm/gup.c. Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Acked-by: Linus Torvalds <torvalds@linux-foundation.org> Acked-by: David Rientjes <rientjes@google.com> Cc: Michel Lespinasse <walken@google.com> Cc: Rik van Riel <riel@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
c561259ca7
commit
acc3c8d15e
2 changed files with 118 additions and 118 deletions
118
mm/gup.c
118
mm/gup.c
|
@ -818,6 +818,124 @@ long get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(get_user_pages);
|
EXPORT_SYMBOL(get_user_pages);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* populate_vma_page_range() - populate a range of pages in the vma.
|
||||||
|
* @vma: target vma
|
||||||
|
* @start: start address
|
||||||
|
* @end: end address
|
||||||
|
* @nonblocking:
|
||||||
|
*
|
||||||
|
* This takes care of mlocking the pages too if VM_LOCKED is set.
|
||||||
|
*
|
||||||
|
* return 0 on success, negative error code on error.
|
||||||
|
*
|
||||||
|
* vma->vm_mm->mmap_sem must be held.
|
||||||
|
*
|
||||||
|
* If @nonblocking is NULL, it may be held for read or write and will
|
||||||
|
* be unperturbed.
|
||||||
|
*
|
||||||
|
* If @nonblocking is non-NULL, it must held for read only and may be
|
||||||
|
* released. If it's released, *@nonblocking will be set to 0.
|
||||||
|
*/
|
||||||
|
long populate_vma_page_range(struct vm_area_struct *vma,
|
||||||
|
unsigned long start, unsigned long end, int *nonblocking)
|
||||||
|
{
|
||||||
|
struct mm_struct *mm = vma->vm_mm;
|
||||||
|
unsigned long nr_pages = (end - start) / PAGE_SIZE;
|
||||||
|
int gup_flags;
|
||||||
|
|
||||||
|
VM_BUG_ON(start & ~PAGE_MASK);
|
||||||
|
VM_BUG_ON(end & ~PAGE_MASK);
|
||||||
|
VM_BUG_ON_VMA(start < vma->vm_start, vma);
|
||||||
|
VM_BUG_ON_VMA(end > vma->vm_end, vma);
|
||||||
|
VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_sem), mm);
|
||||||
|
|
||||||
|
gup_flags = FOLL_TOUCH | FOLL_POPULATE;
|
||||||
|
/*
|
||||||
|
* We want to touch writable mappings with a write fault in order
|
||||||
|
* to break COW, except for shared mappings because these don't COW
|
||||||
|
* and we would not want to dirty them for nothing.
|
||||||
|
*/
|
||||||
|
if ((vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE)
|
||||||
|
gup_flags |= FOLL_WRITE;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We want mlock to succeed for regions that have any permissions
|
||||||
|
* other than PROT_NONE.
|
||||||
|
*/
|
||||||
|
if (vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC))
|
||||||
|
gup_flags |= FOLL_FORCE;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We made sure addr is within a VMA, so the following will
|
||||||
|
* not result in a stack expansion that recurses back here.
|
||||||
|
*/
|
||||||
|
return __get_user_pages(current, mm, start, nr_pages, gup_flags,
|
||||||
|
NULL, NULL, nonblocking);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* __mm_populate - populate and/or mlock pages within a range of address space.
|
||||||
|
*
|
||||||
|
* This is used to implement mlock() and the MAP_POPULATE / MAP_LOCKED mmap
|
||||||
|
* flags. VMAs must be already marked with the desired vm_flags, and
|
||||||
|
* mmap_sem must not be held.
|
||||||
|
*/
|
||||||
|
int __mm_populate(unsigned long start, unsigned long len, int ignore_errors)
|
||||||
|
{
|
||||||
|
struct mm_struct *mm = current->mm;
|
||||||
|
unsigned long end, nstart, nend;
|
||||||
|
struct vm_area_struct *vma = NULL;
|
||||||
|
int locked = 0;
|
||||||
|
long ret = 0;
|
||||||
|
|
||||||
|
VM_BUG_ON(start & ~PAGE_MASK);
|
||||||
|
VM_BUG_ON(len != PAGE_ALIGN(len));
|
||||||
|
end = start + len;
|
||||||
|
|
||||||
|
for (nstart = start; nstart < end; nstart = nend) {
|
||||||
|
/*
|
||||||
|
* We want to fault in pages for [nstart; end) address range.
|
||||||
|
* Find first corresponding VMA.
|
||||||
|
*/
|
||||||
|
if (!locked) {
|
||||||
|
locked = 1;
|
||||||
|
down_read(&mm->mmap_sem);
|
||||||
|
vma = find_vma(mm, nstart);
|
||||||
|
} else if (nstart >= vma->vm_end)
|
||||||
|
vma = vma->vm_next;
|
||||||
|
if (!vma || vma->vm_start >= end)
|
||||||
|
break;
|
||||||
|
/*
|
||||||
|
* Set [nstart; nend) to intersection of desired address
|
||||||
|
* range with the first VMA. Also, skip undesirable VMA types.
|
||||||
|
*/
|
||||||
|
nend = min(end, vma->vm_end);
|
||||||
|
if (vma->vm_flags & (VM_IO | VM_PFNMAP))
|
||||||
|
continue;
|
||||||
|
if (nstart < vma->vm_start)
|
||||||
|
nstart = vma->vm_start;
|
||||||
|
/*
|
||||||
|
* Now fault in a range of pages. populate_vma_page_range()
|
||||||
|
* double checks the vma flags, so that it won't mlock pages
|
||||||
|
* if the vma was already munlocked.
|
||||||
|
*/
|
||||||
|
ret = populate_vma_page_range(vma, nstart, nend, &locked);
|
||||||
|
if (ret < 0) {
|
||||||
|
if (ignore_errors) {
|
||||||
|
ret = 0;
|
||||||
|
continue; /* continue at next VMA */
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
nend = nstart + ret * PAGE_SIZE;
|
||||||
|
ret = 0;
|
||||||
|
}
|
||||||
|
if (locked)
|
||||||
|
up_read(&mm->mmap_sem);
|
||||||
|
return ret; /* 0 or negative error code */
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* get_dump_page() - pin user page in memory while writing it to core dump
|
* get_dump_page() - pin user page in memory while writing it to core dump
|
||||||
* @addr: user address
|
* @addr: user address
|
||||||
|
|
118
mm/mlock.c
118
mm/mlock.c
|
@ -205,62 +205,6 @@ out:
|
||||||
return nr_pages - 1;
|
return nr_pages - 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* populate_vma_page_range() - populate a range of pages in the vma.
|
|
||||||
* @vma: target vma
|
|
||||||
* @start: start address
|
|
||||||
* @end: end address
|
|
||||||
* @nonblocking:
|
|
||||||
*
|
|
||||||
* This takes care of mlocking the pages too if VM_LOCKED is set.
|
|
||||||
*
|
|
||||||
* return 0 on success, negative error code on error.
|
|
||||||
*
|
|
||||||
* vma->vm_mm->mmap_sem must be held.
|
|
||||||
*
|
|
||||||
* If @nonblocking is NULL, it may be held for read or write and will
|
|
||||||
* be unperturbed.
|
|
||||||
*
|
|
||||||
* If @nonblocking is non-NULL, it must held for read only and may be
|
|
||||||
* released. If it's released, *@nonblocking will be set to 0.
|
|
||||||
*/
|
|
||||||
long populate_vma_page_range(struct vm_area_struct *vma,
|
|
||||||
unsigned long start, unsigned long end, int *nonblocking)
|
|
||||||
{
|
|
||||||
struct mm_struct *mm = vma->vm_mm;
|
|
||||||
unsigned long nr_pages = (end - start) / PAGE_SIZE;
|
|
||||||
int gup_flags;
|
|
||||||
|
|
||||||
VM_BUG_ON(start & ~PAGE_MASK);
|
|
||||||
VM_BUG_ON(end & ~PAGE_MASK);
|
|
||||||
VM_BUG_ON_VMA(start < vma->vm_start, vma);
|
|
||||||
VM_BUG_ON_VMA(end > vma->vm_end, vma);
|
|
||||||
VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_sem), mm);
|
|
||||||
|
|
||||||
gup_flags = FOLL_TOUCH | FOLL_POPULATE;
|
|
||||||
/*
|
|
||||||
* We want to touch writable mappings with a write fault in order
|
|
||||||
* to break COW, except for shared mappings because these don't COW
|
|
||||||
* and we would not want to dirty them for nothing.
|
|
||||||
*/
|
|
||||||
if ((vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE)
|
|
||||||
gup_flags |= FOLL_WRITE;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* We want mlock to succeed for regions that have any permissions
|
|
||||||
* other than PROT_NONE.
|
|
||||||
*/
|
|
||||||
if (vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC))
|
|
||||||
gup_flags |= FOLL_FORCE;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* We made sure addr is within a VMA, so the following will
|
|
||||||
* not result in a stack expansion that recurses back here.
|
|
||||||
*/
|
|
||||||
return __get_user_pages(current, mm, start, nr_pages, gup_flags,
|
|
||||||
NULL, NULL, nonblocking);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* convert get_user_pages() return value to posix mlock() error
|
* convert get_user_pages() return value to posix mlock() error
|
||||||
*/
|
*/
|
||||||
|
@ -660,68 +604,6 @@ static int do_mlock(unsigned long start, size_t len, int on)
|
||||||
return error;
|
return error;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* __mm_populate - populate and/or mlock pages within a range of address space.
|
|
||||||
*
|
|
||||||
* This is used to implement mlock() and the MAP_POPULATE / MAP_LOCKED mmap
|
|
||||||
* flags. VMAs must be already marked with the desired vm_flags, and
|
|
||||||
* mmap_sem must not be held.
|
|
||||||
*/
|
|
||||||
int __mm_populate(unsigned long start, unsigned long len, int ignore_errors)
|
|
||||||
{
|
|
||||||
struct mm_struct *mm = current->mm;
|
|
||||||
unsigned long end, nstart, nend;
|
|
||||||
struct vm_area_struct *vma = NULL;
|
|
||||||
int locked = 0;
|
|
||||||
long ret = 0;
|
|
||||||
|
|
||||||
VM_BUG_ON(start & ~PAGE_MASK);
|
|
||||||
VM_BUG_ON(len != PAGE_ALIGN(len));
|
|
||||||
end = start + len;
|
|
||||||
|
|
||||||
for (nstart = start; nstart < end; nstart = nend) {
|
|
||||||
/*
|
|
||||||
* We want to fault in pages for [nstart; end) address range.
|
|
||||||
* Find first corresponding VMA.
|
|
||||||
*/
|
|
||||||
if (!locked) {
|
|
||||||
locked = 1;
|
|
||||||
down_read(&mm->mmap_sem);
|
|
||||||
vma = find_vma(mm, nstart);
|
|
||||||
} else if (nstart >= vma->vm_end)
|
|
||||||
vma = vma->vm_next;
|
|
||||||
if (!vma || vma->vm_start >= end)
|
|
||||||
break;
|
|
||||||
/*
|
|
||||||
* Set [nstart; nend) to intersection of desired address
|
|
||||||
* range with the first VMA. Also, skip undesirable VMA types.
|
|
||||||
*/
|
|
||||||
nend = min(end, vma->vm_end);
|
|
||||||
if (vma->vm_flags & (VM_IO | VM_PFNMAP))
|
|
||||||
continue;
|
|
||||||
if (nstart < vma->vm_start)
|
|
||||||
nstart = vma->vm_start;
|
|
||||||
/*
|
|
||||||
* Now fault in a range of pages. populate_vma_page_range()
|
|
||||||
* double checks the vma flags, so that it won't mlock pages
|
|
||||||
* if the vma was already munlocked.
|
|
||||||
*/
|
|
||||||
ret = populate_vma_page_range(vma, nstart, nend, &locked);
|
|
||||||
if (ret < 0) {
|
|
||||||
if (ignore_errors) {
|
|
||||||
ret = 0;
|
|
||||||
continue; /* continue at next VMA */
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
nend = nstart + ret * PAGE_SIZE;
|
|
||||||
ret = 0;
|
|
||||||
}
|
|
||||||
if (locked)
|
|
||||||
up_read(&mm->mmap_sem);
|
|
||||||
return ret; /* 0 or negative error code */
|
|
||||||
}
|
|
||||||
|
|
||||||
SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
|
SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
|
||||||
{
|
{
|
||||||
unsigned long locked;
|
unsigned long locked;
|
||||||
|
|
Loading…
Add table
Reference in a new issue