mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-03-17 20:54:10 +00:00
huge_memory: convert unmap_page() to unmap_folio()
Remove a folio->page->folio conversion. Link: https://lkml.kernel.org/r/20220902194653.1739778-54-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
3e9a13daa6
commit
684555aacc
1 changed files with 6 additions and 7 deletions
|
@ -2355,13 +2355,12 @@ void vma_adjust_trans_huge(struct vm_area_struct *vma,
|
|||
}
|
||||
}
|
||||
|
||||
static void unmap_page(struct page *page)
|
||||
static void unmap_folio(struct folio *folio)
|
||||
{
|
||||
struct folio *folio = page_folio(page);
|
||||
enum ttu_flags ttu_flags = TTU_RMAP_LOCKED | TTU_SPLIT_HUGE_PMD |
|
||||
TTU_SYNC;
|
||||
|
||||
VM_BUG_ON_PAGE(!PageHead(page), page);
|
||||
VM_BUG_ON_FOLIO(!folio_test_large(folio), folio);
|
||||
|
||||
/*
|
||||
* Anon pages need migration entries to preserve them, but file
|
||||
|
@ -2378,7 +2377,7 @@ static void remap_page(struct folio *folio, unsigned long nr)
|
|||
{
|
||||
int i = 0;
|
||||
|
||||
/* If unmap_page() uses try_to_migrate() on file, remove this check */
|
||||
/* If unmap_folio() uses try_to_migrate() on file, remove this check */
|
||||
if (!folio_test_anon(folio))
|
||||
return;
|
||||
for (;;) {
|
||||
|
@ -2428,7 +2427,7 @@ static void __split_huge_page_tail(struct page *head, int tail,
|
|||
* for example lock_page() which set PG_waiters.
|
||||
*
|
||||
* Note that for mapped sub-pages of an anonymous THP,
|
||||
* PG_anon_exclusive has been cleared in unmap_page() and is stored in
|
||||
* PG_anon_exclusive has been cleared in unmap_folio() and is stored in
|
||||
* the migration entry instead from where remap_page() will restore it.
|
||||
* We can still have PG_anon_exclusive set on effectively unmapped and
|
||||
* unreferenced sub-pages of an anonymous THP: we can simply drop
|
||||
|
@ -2700,7 +2699,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
|
|||
}
|
||||
|
||||
/*
|
||||
* Racy check if we can split the page, before unmap_page() will
|
||||
* Racy check if we can split the page, before unmap_folio() will
|
||||
* split PMDs
|
||||
*/
|
||||
if (!can_split_folio(folio, &extra_pins)) {
|
||||
|
@ -2708,7 +2707,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
|
|||
goto out_unlock;
|
||||
}
|
||||
|
||||
unmap_page(&folio->page);
|
||||
unmap_folio(folio);
|
||||
|
||||
/* block interrupt reentry in xa_lock and spinlock */
|
||||
local_irq_disable();
|
||||
|
|
Loading…
Add table
Reference in a new issue