mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-06-06 06:37:59 +00:00
mm/hmm: cleanup special vma handling (VM_SPECIAL)
Special vma (one with any of the VM_SPECIAL flags) can not be access by device because there is no consistent model across device drivers on those vma and their backing memory. This patch directly use hmm_range struct for hmm_pfns_special() argument as it is always affecting the whole vma and thus the whole range. It also make behavior consistent after this patch both hmm_vma_fault() and hmm_vma_get_pfns() returns -EINVAL when facing such vma. Previously hmm_vma_fault() returned 0 and hmm_vma_get_pfns() return -EINVAL but both were filling the HMM pfn array with special entry. Link: http://lkml.kernel.org/r/20180323005527.758-10-jglisse@redhat.com Signed-off-by: Jérôme Glisse <jglisse@redhat.com> Reviewed-by: John Hubbard <jhubbard@nvidia.com> Cc: Evgeny Baskakov <ebaskakov@nvidia.com> Cc: Ralph Campbell <rcampbell@nvidia.com> Cc: Mark Hairgrove <mhairgrove@nvidia.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
ff05c0c6bb
commit
855ce7d252
1 changed files with 20 additions and 20 deletions
40
mm/hmm.c
40
mm/hmm.c
|
@ -324,14 +324,6 @@ static int hmm_vma_do_fault(struct mm_walk *walk,
|
||||||
return -EAGAIN;
|
return -EAGAIN;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void hmm_pfns_special(uint64_t *pfns,
|
|
||||||
unsigned long addr,
|
|
||||||
unsigned long end)
|
|
||||||
{
|
|
||||||
for (; addr < end; addr += PAGE_SIZE, pfns++)
|
|
||||||
*pfns = HMM_PFN_SPECIAL;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int hmm_pfns_bad(unsigned long addr,
|
static int hmm_pfns_bad(unsigned long addr,
|
||||||
unsigned long end,
|
unsigned long end,
|
||||||
struct mm_walk *walk)
|
struct mm_walk *walk)
|
||||||
|
@ -529,6 +521,14 @@ fault:
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void hmm_pfns_special(struct hmm_range *range)
|
||||||
|
{
|
||||||
|
unsigned long addr = range->start, i = 0;
|
||||||
|
|
||||||
|
for (; addr < range->end; addr += PAGE_SIZE, i++)
|
||||||
|
range->pfns[i] = HMM_PFN_SPECIAL;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* hmm_vma_get_pfns() - snapshot CPU page table for a range of virtual addresses
|
* hmm_vma_get_pfns() - snapshot CPU page table for a range of virtual addresses
|
||||||
* @range: range being snapshotted
|
* @range: range being snapshotted
|
||||||
|
@ -553,12 +553,6 @@ int hmm_vma_get_pfns(struct hmm_range *range)
|
||||||
struct mm_walk mm_walk;
|
struct mm_walk mm_walk;
|
||||||
struct hmm *hmm;
|
struct hmm *hmm;
|
||||||
|
|
||||||
/* FIXME support hugetlb fs */
|
|
||||||
if (is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_SPECIAL)) {
|
|
||||||
hmm_pfns_special(range->pfns, range->start, range->end);
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Sanity check, this really should not happen ! */
|
/* Sanity check, this really should not happen ! */
|
||||||
if (range->start < vma->vm_start || range->start >= vma->vm_end)
|
if (range->start < vma->vm_start || range->start >= vma->vm_end)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
@ -572,6 +566,12 @@ int hmm_vma_get_pfns(struct hmm_range *range)
|
||||||
if (!hmm->mmu_notifier.ops)
|
if (!hmm->mmu_notifier.ops)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
|
/* FIXME support hugetlb fs */
|
||||||
|
if (is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_SPECIAL)) {
|
||||||
|
hmm_pfns_special(range);
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
if (!(vma->vm_flags & VM_READ)) {
|
if (!(vma->vm_flags & VM_READ)) {
|
||||||
/*
|
/*
|
||||||
* If vma do not allow read access, then assume that it does
|
* If vma do not allow read access, then assume that it does
|
||||||
|
@ -740,6 +740,12 @@ int hmm_vma_fault(struct hmm_range *range, bool write, bool block)
|
||||||
if (!hmm->mmu_notifier.ops)
|
if (!hmm->mmu_notifier.ops)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
|
/* FIXME support hugetlb fs */
|
||||||
|
if (is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_SPECIAL)) {
|
||||||
|
hmm_pfns_special(range);
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
if (!(vma->vm_flags & VM_READ)) {
|
if (!(vma->vm_flags & VM_READ)) {
|
||||||
/*
|
/*
|
||||||
* If vma do not allow read access, then assume that it does
|
* If vma do not allow read access, then assume that it does
|
||||||
|
@ -751,12 +757,6 @@ int hmm_vma_fault(struct hmm_range *range, bool write, bool block)
|
||||||
return -EPERM;
|
return -EPERM;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* FIXME support hugetlb fs */
|
|
||||||
if (is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_SPECIAL)) {
|
|
||||||
hmm_pfns_special(range->pfns, range->start, range->end);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Initialize range to track CPU page table update */
|
/* Initialize range to track CPU page table update */
|
||||||
spin_lock(&hmm->lock);
|
spin_lock(&hmm->lock);
|
||||||
range->valid = true;
|
range->valid = true;
|
||||||
|
|
Loading…
Add table
Reference in a new issue