mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-06-20 05:31:15 +00:00
[PATCH] mm: unmap_vmas with inner ptlock
Remove the page_table_lock from around the calls to unmap_vmas, and replace the pte_offset_map in zap_pte_range by pte_offset_map_lock: all callers are now safe to descend without page_table_lock. Don't attempt fancy locking for hugepages, just take page_table_lock in unmap_hugepage_range. Which makes zap_hugepage_range, and the hugetlb test in zap_page_range, redundant: unmap_vmas calls unmap_hugepage_range anyway. Nor does unmap_vmas have much use for its mm arg now. The tlb_start_vma and tlb_end_vma in unmap_page_range are now called without page_table_lock: if they're implemented at all, they typically come down to flush_cache_range (usually done outside page_table_lock) and flush_tlb_range (which we already audited for the mprotect case). Signed-off-by: Hugh Dickins <hugh@veritas.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
parent
8f4f8c164c
commit
508034a32b
6 changed files with 21 additions and 54 deletions
12
mm/hugetlb.c
12
mm/hugetlb.c
|
@ -314,6 +314,8 @@ void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
|
|||
BUG_ON(start & ~HPAGE_MASK);
|
||||
BUG_ON(end & ~HPAGE_MASK);
|
||||
|
||||
spin_lock(&mm->page_table_lock);
|
||||
|
||||
/* Update high watermark before we lower rss */
|
||||
update_hiwater_rss(mm);
|
||||
|
||||
|
@ -333,17 +335,9 @@ void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
|
|||
put_page(page);
|
||||
add_mm_counter(mm, file_rss, (int) -(HPAGE_SIZE / PAGE_SIZE));
|
||||
}
|
||||
flush_tlb_range(vma, start, end);
|
||||
}
|
||||
|
||||
void zap_hugepage_range(struct vm_area_struct *vma,
|
||||
unsigned long start, unsigned long length)
|
||||
{
|
||||
struct mm_struct *mm = vma->vm_mm;
|
||||
|
||||
spin_lock(&mm->page_table_lock);
|
||||
unmap_hugepage_range(vma, start, start + length);
|
||||
spin_unlock(&mm->page_table_lock);
|
||||
flush_tlb_range(vma, start, end);
|
||||
}
|
||||
|
||||
int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma)
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue