mm/mmu_notifier: use structure for invalidate_range_start/end calls v2

To avoid having to change many call sites everytime we want to add a
parameter use a structure to group all parameters for the mmu_notifier
invalidate_range_start/end cakks.  No functional changes with this patch.

[akpm@linux-foundation.org: coding style fixes]
Link: http://lkml.kernel.org/r/20181205053628.3210-3-jglisse@redhat.com
Signed-off-by: Jérôme Glisse <jglisse@redhat.com>
Acked-by: Christian König <christian.koenig@amd.com>
Acked-by: Jan Kara <jack@suse.cz>
Cc: Matthew Wilcox <mawilcox@microsoft.com>
Cc: Ross Zwisler <zwisler@kernel.org>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Radim Krcmar <rkrcmar@redhat.com>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Felix Kuehling <felix.kuehling@amd.com>
Cc: Ralph Campbell <rcampbell@nvidia.com>
Cc: John Hubbard <jhubbard@nvidia.com>
From: Jérôme Glisse <jglisse@redhat.com>
Subject: mm/mmu_notifier: use structure for invalidate_range_start/end calls v3

fix build warning in migrate.c when CONFIG_MMU_NOTIFIER=n

Link: http://lkml.kernel.org/r/20181213171330.8489-3-jglisse@redhat.com
Signed-off-by: Jérôme Glisse <jglisse@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Jérôme Glisse 2018-12-28 00:38:09 -08:00 committed by Linus Torvalds
parent 5d6527a784
commit ac46d4f3c4
17 changed files with 262 additions and 250 deletions

View file

@ -2299,6 +2299,7 @@ next:
*/
static void migrate_vma_collect(struct migrate_vma *migrate)
{
struct mmu_notifier_range range;
struct mm_walk mm_walk;
mm_walk.pmd_entry = migrate_vma_collect_pmd;
@ -2310,13 +2311,11 @@ static void migrate_vma_collect(struct migrate_vma *migrate)
mm_walk.mm = migrate->vma->vm_mm;
mm_walk.private = migrate;
mmu_notifier_invalidate_range_start(mm_walk.mm,
migrate->start,
migrate->end);
mmu_notifier_range_init(&range, mm_walk.mm, migrate->start,
migrate->end);
mmu_notifier_invalidate_range_start(&range);
walk_page_range(migrate->start, migrate->end, &mm_walk);
mmu_notifier_invalidate_range_end(mm_walk.mm,
migrate->start,
migrate->end);
mmu_notifier_invalidate_range_end(&range);
migrate->end = migrate->start + (migrate->npages << PAGE_SHIFT);
}
@ -2697,9 +2696,8 @@ static void migrate_vma_pages(struct migrate_vma *migrate)
{
const unsigned long npages = migrate->npages;
const unsigned long start = migrate->start;
struct vm_area_struct *vma = migrate->vma;
struct mm_struct *mm = vma->vm_mm;
unsigned long addr, i, mmu_start;
struct mmu_notifier_range range;
unsigned long addr, i;
bool notified = false;
for (i = 0, addr = start; i < npages; addr += PAGE_SIZE, i++) {
@ -2718,11 +2716,12 @@ static void migrate_vma_pages(struct migrate_vma *migrate)
continue;
}
if (!notified) {
mmu_start = addr;
notified = true;
mmu_notifier_invalidate_range_start(mm,
mmu_start,
migrate->end);
mmu_notifier_range_init(&range,
migrate->vma->vm_mm,
addr, migrate->end);
mmu_notifier_invalidate_range_start(&range);
}
migrate_vma_insert_page(migrate, addr, newpage,
&migrate->src[i],
@ -2763,8 +2762,7 @@ static void migrate_vma_pages(struct migrate_vma *migrate)
* did already call it.
*/
if (notified)
mmu_notifier_invalidate_range_only_end(mm, mmu_start,
migrate->end);
mmu_notifier_invalidate_range_only_end(&range);
}
/*