mirror of
https://github.com/Fishwaldo/linux-bl808.git
synced 2025-04-23 22:53:56 +00:00
mm/fork: Pass new vma pointer into copy_page_range()
This prepares for the future work to trigger early cow on pinned pages during fork(). No functional change intended. Signed-off-by: Peter Xu <peterx@redhat.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
008cfe4418
commit
7a4830c380
3 changed files with 11 additions and 7 deletions
|
@ -1646,7 +1646,7 @@ struct mmu_notifier_range;
|
||||||
void free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
|
void free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
|
||||||
unsigned long end, unsigned long floor, unsigned long ceiling);
|
unsigned long end, unsigned long floor, unsigned long ceiling);
|
||||||
int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
|
int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
|
||||||
struct vm_area_struct *vma);
|
struct vm_area_struct *vma, struct vm_area_struct *new);
|
||||||
int follow_pte_pmd(struct mm_struct *mm, unsigned long address,
|
int follow_pte_pmd(struct mm_struct *mm, unsigned long address,
|
||||||
struct mmu_notifier_range *range,
|
struct mmu_notifier_range *range,
|
||||||
pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp);
|
pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp);
|
||||||
|
|
|
@ -589,7 +589,7 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm,
|
||||||
|
|
||||||
mm->map_count++;
|
mm->map_count++;
|
||||||
if (!(tmp->vm_flags & VM_WIPEONFORK))
|
if (!(tmp->vm_flags & VM_WIPEONFORK))
|
||||||
retval = copy_page_range(mm, oldmm, mpnt);
|
retval = copy_page_range(mm, oldmm, mpnt, tmp);
|
||||||
|
|
||||||
if (tmp->vm_ops && tmp->vm_ops->open)
|
if (tmp->vm_ops && tmp->vm_ops->open)
|
||||||
tmp->vm_ops->open(tmp);
|
tmp->vm_ops->open(tmp);
|
||||||
|
|
14
mm/memory.c
14
mm/memory.c
|
@ -819,6 +819,7 @@ copy_present_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
|
||||||
|
|
||||||
static int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
|
static int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
|
||||||
pmd_t *dst_pmd, pmd_t *src_pmd, struct vm_area_struct *vma,
|
pmd_t *dst_pmd, pmd_t *src_pmd, struct vm_area_struct *vma,
|
||||||
|
struct vm_area_struct *new,
|
||||||
unsigned long addr, unsigned long end)
|
unsigned long addr, unsigned long end)
|
||||||
{
|
{
|
||||||
pte_t *orig_src_pte, *orig_dst_pte;
|
pte_t *orig_src_pte, *orig_dst_pte;
|
||||||
|
@ -889,6 +890,7 @@ again:
|
||||||
|
|
||||||
static inline int copy_pmd_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
|
static inline int copy_pmd_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
|
||||||
pud_t *dst_pud, pud_t *src_pud, struct vm_area_struct *vma,
|
pud_t *dst_pud, pud_t *src_pud, struct vm_area_struct *vma,
|
||||||
|
struct vm_area_struct *new,
|
||||||
unsigned long addr, unsigned long end)
|
unsigned long addr, unsigned long end)
|
||||||
{
|
{
|
||||||
pmd_t *src_pmd, *dst_pmd;
|
pmd_t *src_pmd, *dst_pmd;
|
||||||
|
@ -915,7 +917,7 @@ static inline int copy_pmd_range(struct mm_struct *dst_mm, struct mm_struct *src
|
||||||
if (pmd_none_or_clear_bad(src_pmd))
|
if (pmd_none_or_clear_bad(src_pmd))
|
||||||
continue;
|
continue;
|
||||||
if (copy_pte_range(dst_mm, src_mm, dst_pmd, src_pmd,
|
if (copy_pte_range(dst_mm, src_mm, dst_pmd, src_pmd,
|
||||||
vma, addr, next))
|
vma, new, addr, next))
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
} while (dst_pmd++, src_pmd++, addr = next, addr != end);
|
} while (dst_pmd++, src_pmd++, addr = next, addr != end);
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -923,6 +925,7 @@ static inline int copy_pmd_range(struct mm_struct *dst_mm, struct mm_struct *src
|
||||||
|
|
||||||
static inline int copy_pud_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
|
static inline int copy_pud_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
|
||||||
p4d_t *dst_p4d, p4d_t *src_p4d, struct vm_area_struct *vma,
|
p4d_t *dst_p4d, p4d_t *src_p4d, struct vm_area_struct *vma,
|
||||||
|
struct vm_area_struct *new,
|
||||||
unsigned long addr, unsigned long end)
|
unsigned long addr, unsigned long end)
|
||||||
{
|
{
|
||||||
pud_t *src_pud, *dst_pud;
|
pud_t *src_pud, *dst_pud;
|
||||||
|
@ -949,7 +952,7 @@ static inline int copy_pud_range(struct mm_struct *dst_mm, struct mm_struct *src
|
||||||
if (pud_none_or_clear_bad(src_pud))
|
if (pud_none_or_clear_bad(src_pud))
|
||||||
continue;
|
continue;
|
||||||
if (copy_pmd_range(dst_mm, src_mm, dst_pud, src_pud,
|
if (copy_pmd_range(dst_mm, src_mm, dst_pud, src_pud,
|
||||||
vma, addr, next))
|
vma, new, addr, next))
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
} while (dst_pud++, src_pud++, addr = next, addr != end);
|
} while (dst_pud++, src_pud++, addr = next, addr != end);
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -957,6 +960,7 @@ static inline int copy_pud_range(struct mm_struct *dst_mm, struct mm_struct *src
|
||||||
|
|
||||||
static inline int copy_p4d_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
|
static inline int copy_p4d_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
|
||||||
pgd_t *dst_pgd, pgd_t *src_pgd, struct vm_area_struct *vma,
|
pgd_t *dst_pgd, pgd_t *src_pgd, struct vm_area_struct *vma,
|
||||||
|
struct vm_area_struct *new,
|
||||||
unsigned long addr, unsigned long end)
|
unsigned long addr, unsigned long end)
|
||||||
{
|
{
|
||||||
p4d_t *src_p4d, *dst_p4d;
|
p4d_t *src_p4d, *dst_p4d;
|
||||||
|
@ -971,14 +975,14 @@ static inline int copy_p4d_range(struct mm_struct *dst_mm, struct mm_struct *src
|
||||||
if (p4d_none_or_clear_bad(src_p4d))
|
if (p4d_none_or_clear_bad(src_p4d))
|
||||||
continue;
|
continue;
|
||||||
if (copy_pud_range(dst_mm, src_mm, dst_p4d, src_p4d,
|
if (copy_pud_range(dst_mm, src_mm, dst_p4d, src_p4d,
|
||||||
vma, addr, next))
|
vma, new, addr, next))
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
} while (dst_p4d++, src_p4d++, addr = next, addr != end);
|
} while (dst_p4d++, src_p4d++, addr = next, addr != end);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
|
int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
|
||||||
struct vm_area_struct *vma)
|
struct vm_area_struct *vma, struct vm_area_struct *new)
|
||||||
{
|
{
|
||||||
pgd_t *src_pgd, *dst_pgd;
|
pgd_t *src_pgd, *dst_pgd;
|
||||||
unsigned long next;
|
unsigned long next;
|
||||||
|
@ -1033,7 +1037,7 @@ int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
|
||||||
if (pgd_none_or_clear_bad(src_pgd))
|
if (pgd_none_or_clear_bad(src_pgd))
|
||||||
continue;
|
continue;
|
||||||
if (unlikely(copy_p4d_range(dst_mm, src_mm, dst_pgd, src_pgd,
|
if (unlikely(copy_p4d_range(dst_mm, src_mm, dst_pgd, src_pgd,
|
||||||
vma, addr, next))) {
|
vma, new, addr, next))) {
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
Loading…
Add table
Reference in a new issue