mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-03-16 12:14:06 +00:00
mm/memremap: fix memunmap_pages() race with get_dev_pagemap()
[ Upstream commit1e57ffb6e3
] Think about the below scene: CPU1 CPU2 memunmap_pages percpu_ref_exit __percpu_ref_exit free_percpu(percpu_count); /* percpu_count is freed here! */ get_dev_pagemap xa_load(&pgmap_array, PHYS_PFN(phys)) /* pgmap still in the pgmap_array */ percpu_ref_tryget_live(&pgmap->ref) if __ref_is_percpu /* __PERCPU_REF_ATOMIC_DEAD not set yet */ this_cpu_inc(*percpu_count) /* access freed percpu_count here! */ ref->percpu_count_ptr = __PERCPU_REF_ATOMIC_DEAD; /* too late... */ pageunmap_range To fix the issue, do percpu_ref_exit() after pgmap_array is emptied. So we won't do percpu_ref_tryget_live() against a being freed percpu_ref. Link: https://lkml.kernel.org/r/20220609121305.2508-1-linmiaohe@huawei.com Fixes:b7b3c01b19
("mm/memremap_pages: support multiple ranges per invocation") Signed-off-by: Miaohe Lin <linmiaohe@huawei.com> Cc: Dan Williams <dan.j.williams@intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:
parent
7d4a851fc7
commit
cdb2b590c1
1 changed files with 1 additions and 1 deletions
|
@ -148,10 +148,10 @@ void memunmap_pages(struct dev_pagemap *pgmap)
|
|||
for_each_device_pfn(pfn, pgmap, i)
|
||||
put_page(pfn_to_page(pfn));
|
||||
wait_for_completion(&pgmap->done);
|
||||
percpu_ref_exit(&pgmap->ref);
|
||||
|
||||
for (i = 0; i < pgmap->nr_range; i++)
|
||||
pageunmap_range(pgmap, i);
|
||||
percpu_ref_exit(&pgmap->ref);
|
||||
|
||||
WARN_ONCE(pgmap->altmap.alloc, "failed to free all reserved pages\n");
|
||||
devmap_managed_enable_put(pgmap);
|
||||
|
|
Loading…
Add table
Reference in a new issue