mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-06-28 01:21:58 +00:00
mm: use new helper functions around the i_mmap_mutex
Convert all open coded mutex_lock/unlock calls to the i_mmap_[lock/unlock]_write() helpers. Signed-off-by: Davidlohr Bueso <dbueso@suse.de> Acked-by: Rik van Riel <riel@redhat.com> Acked-by: "Kirill A. Shutemov" <kirill@shutemov.name> Acked-by: Hugh Dickins <hughd@google.com> Cc: Oleg Nesterov <oleg@redhat.com> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Srikar Dronamraju <srikar@linux.vnet.ibm.com> Acked-by: Mel Gorman <mgorman@suse.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
8b28f621be
commit
83cde9e8ba
12 changed files with 40 additions and 40 deletions
12
mm/hugetlb.c
12
mm/hugetlb.c
|
@ -2774,7 +2774,7 @@ static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
|
|||
* this mapping should be shared between all the VMAs,
|
||||
* __unmap_hugepage_range() is called as the lock is already held
|
||||
*/
|
||||
mutex_lock(&mapping->i_mmap_mutex);
|
||||
i_mmap_lock_write(mapping);
|
||||
vma_interval_tree_foreach(iter_vma, &mapping->i_mmap, pgoff, pgoff) {
|
||||
/* Do not unmap the current VMA */
|
||||
if (iter_vma == vma)
|
||||
|
@ -2791,7 +2791,7 @@ static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
|
|||
unmap_hugepage_range(iter_vma, address,
|
||||
address + huge_page_size(h), page);
|
||||
}
|
||||
mutex_unlock(&mapping->i_mmap_mutex);
|
||||
i_mmap_unlock_write(mapping);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -3348,7 +3348,7 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
|
|||
flush_cache_range(vma, address, end);
|
||||
|
||||
mmu_notifier_invalidate_range_start(mm, start, end);
|
||||
mutex_lock(&vma->vm_file->f_mapping->i_mmap_mutex);
|
||||
i_mmap_lock_write(vma->vm_file->f_mapping);
|
||||
for (; address < end; address += huge_page_size(h)) {
|
||||
spinlock_t *ptl;
|
||||
ptep = huge_pte_offset(mm, address);
|
||||
|
@ -3376,7 +3376,7 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
|
|||
* and that page table be reused and filled with junk.
|
||||
*/
|
||||
flush_tlb_range(vma, start, end);
|
||||
mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex);
|
||||
i_mmap_unlock_write(vma->vm_file->f_mapping);
|
||||
mmu_notifier_invalidate_range_end(mm, start, end);
|
||||
|
||||
return pages << h->order;
|
||||
|
@ -3544,7 +3544,7 @@ pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
|
|||
if (!vma_shareable(vma, addr))
|
||||
return (pte_t *)pmd_alloc(mm, pud, addr);
|
||||
|
||||
mutex_lock(&mapping->i_mmap_mutex);
|
||||
i_mmap_lock_write(mapping);
|
||||
vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) {
|
||||
if (svma == vma)
|
||||
continue;
|
||||
|
@ -3572,7 +3572,7 @@ pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
|
|||
spin_unlock(ptl);
|
||||
out:
|
||||
pte = (pte_t *)pmd_alloc(mm, pud, addr);
|
||||
mutex_unlock(&mapping->i_mmap_mutex);
|
||||
i_mmap_unlock_write(mapping);
|
||||
return pte;
|
||||
}
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue