mirror of
https://github.com/Fishwaldo/linux-bl808.git
synced 2025-03-16 12:04:08 +00:00
mm/hugetlb: unify clearing of RestoreReserve for private pages
A trivial cleanup to move clearing of RestoreReserve into adding anon rmap of private hugetlb mappings. It matches with the shared mappings where we only clear the bit when adding into page cache, rather than spreading it around the code paths. Link: https://lkml.kernel.org/r/20221020193832.776173-1-peterx@redhat.com Signed-off-by: Peter Xu <peterx@redhat.com> Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com> Cc: Muchun Song <songmuchun@bytedance.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
cc03817c0e
commit
4781593d5d
2 changed files with 5 additions and 11 deletions
14
mm/hugetlb.c
14
mm/hugetlb.c
|
@ -4775,7 +4775,6 @@ hugetlb_install_page(struct vm_area_struct *vma, pte_t *ptep, unsigned long addr
|
|||
hugepage_add_new_anon_rmap(new_page, vma, addr);
|
||||
set_huge_pte_at(vma->vm_mm, addr, ptep, make_huge_pte(vma, new_page, 1));
|
||||
hugetlb_count_add(pages_per_huge_page(hstate_vma(vma)), vma->vm_mm);
|
||||
ClearHPageRestoreReserve(new_page);
|
||||
SetHPageMigratable(new_page);
|
||||
}
|
||||
|
||||
|
@ -5438,8 +5437,6 @@ retry_avoidcopy:
|
|||
spin_lock(ptl);
|
||||
ptep = huge_pte_offset(mm, haddr, huge_page_size(h));
|
||||
if (likely(ptep && pte_same(huge_ptep_get(ptep), pte))) {
|
||||
ClearHPageRestoreReserve(new_page);
|
||||
|
||||
/* Break COW or unshare */
|
||||
huge_ptep_clear_flush(vma, haddr, ptep);
|
||||
mmu_notifier_invalidate_range(mm, range.start, range.end);
|
||||
|
@ -5734,10 +5731,9 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
|
|||
if (!pte_same(huge_ptep_get(ptep), old_pte))
|
||||
goto backout;
|
||||
|
||||
if (anon_rmap) {
|
||||
ClearHPageRestoreReserve(page);
|
||||
if (anon_rmap)
|
||||
hugepage_add_new_anon_rmap(page, vma, haddr);
|
||||
} else
|
||||
else
|
||||
page_dup_file_rmap(page, true);
|
||||
new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
|
||||
&& (vma->vm_flags & VM_SHARED)));
|
||||
|
@ -6120,12 +6116,10 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
|
|||
if (!huge_pte_none_mostly(huge_ptep_get(dst_pte)))
|
||||
goto out_release_unlock;
|
||||
|
||||
if (page_in_pagecache) {
|
||||
if (page_in_pagecache)
|
||||
page_dup_file_rmap(page, true);
|
||||
} else {
|
||||
ClearHPageRestoreReserve(page);
|
||||
else
|
||||
hugepage_add_new_anon_rmap(page, dst_vma, dst_addr);
|
||||
}
|
||||
|
||||
/*
|
||||
* For either: (1) CONTINUE on a non-shared VMA, or (2) UFFDIO_COPY
|
||||
|
|
|
@ -2571,7 +2571,7 @@ void hugepage_add_new_anon_rmap(struct page *page,
|
|||
BUG_ON(address < vma->vm_start || address >= vma->vm_end);
|
||||
atomic_set(compound_mapcount_ptr(page), 0);
|
||||
atomic_set(compound_pincount_ptr(page), 0);
|
||||
|
||||
ClearHPageRestoreReserve(page);
|
||||
__page_set_anon_rmap(page, vma, address, 1);
|
||||
}
|
||||
#endif /* CONFIG_HUGETLB_PAGE */
|
||||
|
|
Loading…
Add table
Reference in a new issue