mirror of
https://github.com/Fishwaldo/linux-bl808.git
synced 2025-06-17 20:25:19 +00:00
mm/mmu_notifier: use structure for invalidate_range_start/end calls v2
To avoid having to change many call sites everytime we want to add a parameter use a structure to group all parameters for the mmu_notifier invalidate_range_start/end cakks. No functional changes with this patch. [akpm@linux-foundation.org: coding style fixes] Link: http://lkml.kernel.org/r/20181205053628.3210-3-jglisse@redhat.com Signed-off-by: Jérôme Glisse <jglisse@redhat.com> Acked-by: Christian König <christian.koenig@amd.com> Acked-by: Jan Kara <jack@suse.cz> Cc: Matthew Wilcox <mawilcox@microsoft.com> Cc: Ross Zwisler <zwisler@kernel.org> Cc: Dan Williams <dan.j.williams@intel.com> Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: Radim Krcmar <rkrcmar@redhat.com> Cc: Michal Hocko <mhocko@kernel.org> Cc: Felix Kuehling <felix.kuehling@amd.com> Cc: Ralph Campbell <rcampbell@nvidia.com> Cc: John Hubbard <jhubbard@nvidia.com> From: Jérôme Glisse <jglisse@redhat.com> Subject: mm/mmu_notifier: use structure for invalidate_range_start/end calls v3 fix build warning in migrate.c when CONFIG_MMU_NOTIFIER=n Link: http://lkml.kernel.org/r/20181213171330.8489-3-jglisse@redhat.com Signed-off-by: Jérôme Glisse <jglisse@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
5d6527a784
commit
ac46d4f3c4
17 changed files with 262 additions and 250 deletions
52
mm/hugetlb.c
52
mm/hugetlb.c
|
@ -3240,16 +3240,16 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
|
|||
int cow;
|
||||
struct hstate *h = hstate_vma(vma);
|
||||
unsigned long sz = huge_page_size(h);
|
||||
unsigned long mmun_start; /* For mmu_notifiers */
|
||||
unsigned long mmun_end; /* For mmu_notifiers */
|
||||
struct mmu_notifier_range range;
|
||||
int ret = 0;
|
||||
|
||||
cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
|
||||
|
||||
mmun_start = vma->vm_start;
|
||||
mmun_end = vma->vm_end;
|
||||
if (cow)
|
||||
mmu_notifier_invalidate_range_start(src, mmun_start, mmun_end);
|
||||
if (cow) {
|
||||
mmu_notifier_range_init(&range, src, vma->vm_start,
|
||||
vma->vm_end);
|
||||
mmu_notifier_invalidate_range_start(&range);
|
||||
}
|
||||
|
||||
for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) {
|
||||
spinlock_t *src_ptl, *dst_ptl;
|
||||
|
@ -3325,7 +3325,7 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
|
|||
}
|
||||
|
||||
if (cow)
|
||||
mmu_notifier_invalidate_range_end(src, mmun_start, mmun_end);
|
||||
mmu_notifier_invalidate_range_end(&range);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -3342,8 +3342,7 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
|
|||
struct page *page;
|
||||
struct hstate *h = hstate_vma(vma);
|
||||
unsigned long sz = huge_page_size(h);
|
||||
unsigned long mmun_start = start; /* For mmu_notifiers */
|
||||
unsigned long mmun_end = end; /* For mmu_notifiers */
|
||||
struct mmu_notifier_range range;
|
||||
|
||||
WARN_ON(!is_vm_hugetlb_page(vma));
|
||||
BUG_ON(start & ~huge_page_mask(h));
|
||||
|
@ -3359,8 +3358,9 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
|
|||
/*
|
||||
* If sharing possible, alert mmu notifiers of worst case.
|
||||
*/
|
||||
adjust_range_if_pmd_sharing_possible(vma, &mmun_start, &mmun_end);
|
||||
mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
|
||||
mmu_notifier_range_init(&range, mm, start, end);
|
||||
adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end);
|
||||
mmu_notifier_invalidate_range_start(&range);
|
||||
address = start;
|
||||
for (; address < end; address += sz) {
|
||||
ptep = huge_pte_offset(mm, address, sz);
|
||||
|
@ -3428,7 +3428,7 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
|
|||
if (ref_page)
|
||||
break;
|
||||
}
|
||||
mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
|
||||
mmu_notifier_invalidate_range_end(&range);
|
||||
tlb_end_vma(tlb, vma);
|
||||
}
|
||||
|
||||
|
@ -3546,9 +3546,8 @@ static vm_fault_t hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
|
|||
struct page *old_page, *new_page;
|
||||
int outside_reserve = 0;
|
||||
vm_fault_t ret = 0;
|
||||
unsigned long mmun_start; /* For mmu_notifiers */
|
||||
unsigned long mmun_end; /* For mmu_notifiers */
|
||||
unsigned long haddr = address & huge_page_mask(h);
|
||||
struct mmu_notifier_range range;
|
||||
|
||||
pte = huge_ptep_get(ptep);
|
||||
old_page = pte_page(pte);
|
||||
|
@ -3627,9 +3626,8 @@ retry_avoidcopy:
|
|||
__SetPageUptodate(new_page);
|
||||
set_page_huge_active(new_page);
|
||||
|
||||
mmun_start = haddr;
|
||||
mmun_end = mmun_start + huge_page_size(h);
|
||||
mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
|
||||
mmu_notifier_range_init(&range, mm, haddr, haddr + huge_page_size(h));
|
||||
mmu_notifier_invalidate_range_start(&range);
|
||||
|
||||
/*
|
||||
* Retake the page table lock to check for racing updates
|
||||
|
@ -3642,7 +3640,7 @@ retry_avoidcopy:
|
|||
|
||||
/* Break COW */
|
||||
huge_ptep_clear_flush(vma, haddr, ptep);
|
||||
mmu_notifier_invalidate_range(mm, mmun_start, mmun_end);
|
||||
mmu_notifier_invalidate_range(mm, range.start, range.end);
|
||||
set_huge_pte_at(mm, haddr, ptep,
|
||||
make_huge_pte(vma, new_page, 1));
|
||||
page_remove_rmap(old_page, true);
|
||||
|
@ -3651,7 +3649,7 @@ retry_avoidcopy:
|
|||
new_page = old_page;
|
||||
}
|
||||
spin_unlock(ptl);
|
||||
mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
|
||||
mmu_notifier_invalidate_range_end(&range);
|
||||
out_release_all:
|
||||
restore_reserve_on_error(h, vma, haddr, new_page);
|
||||
put_page(new_page);
|
||||
|
@ -4340,21 +4338,21 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
|
|||
pte_t pte;
|
||||
struct hstate *h = hstate_vma(vma);
|
||||
unsigned long pages = 0;
|
||||
unsigned long f_start = start;
|
||||
unsigned long f_end = end;
|
||||
bool shared_pmd = false;
|
||||
struct mmu_notifier_range range;
|
||||
|
||||
/*
|
||||
* In the case of shared PMDs, the area to flush could be beyond
|
||||
* start/end. Set f_start/f_end to cover the maximum possible
|
||||
* start/end. Set range.start/range.end to cover the maximum possible
|
||||
* range if PMD sharing is possible.
|
||||
*/
|
||||
adjust_range_if_pmd_sharing_possible(vma, &f_start, &f_end);
|
||||
mmu_notifier_range_init(&range, mm, start, end);
|
||||
adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end);
|
||||
|
||||
BUG_ON(address >= end);
|
||||
flush_cache_range(vma, f_start, f_end);
|
||||
flush_cache_range(vma, range.start, range.end);
|
||||
|
||||
mmu_notifier_invalidate_range_start(mm, f_start, f_end);
|
||||
mmu_notifier_invalidate_range_start(&range);
|
||||
i_mmap_lock_write(vma->vm_file->f_mapping);
|
||||
for (; address < end; address += huge_page_size(h)) {
|
||||
spinlock_t *ptl;
|
||||
|
@ -4405,7 +4403,7 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
|
|||
* did unshare a page of pmds, flush the range corresponding to the pud.
|
||||
*/
|
||||
if (shared_pmd)
|
||||
flush_hugetlb_tlb_range(vma, f_start, f_end);
|
||||
flush_hugetlb_tlb_range(vma, range.start, range.end);
|
||||
else
|
||||
flush_hugetlb_tlb_range(vma, start, end);
|
||||
/*
|
||||
|
@ -4415,7 +4413,7 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
|
|||
* See Documentation/vm/mmu_notifier.rst
|
||||
*/
|
||||
i_mmap_unlock_write(vma->vm_file->f_mapping);
|
||||
mmu_notifier_invalidate_range_end(mm, f_start, f_end);
|
||||
mmu_notifier_invalidate_range_end(&range);
|
||||
|
||||
return pages << h->order;
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue