diff --git a/mm/hugetlb_vmemmap.c b/mm/hugetlb_vmemmap.c index 1198064f80eb..27f001e0f0a2 100644 --- a/mm/hugetlb_vmemmap.c +++ b/mm/hugetlb_vmemmap.c @@ -264,7 +264,7 @@ static void vmemmap_remap_pte(pte_t *pte, unsigned long addr, * How many struct page structs need to be reset. When we reuse the head * struct page, the special metadata (e.g. page->flags or page->mapping) * cannot copy to the tail struct page structs. The invalid value will be - * checked in the free_tail_pages_check(). In order to avoid the message + * checked in the free_tail_page_prepare(). In order to avoid the message * of "corrupted mapping in tail page". We need to reset at least 3 (one * head struct page struct and two tail struct page structs) struct page * structs. diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 9c325e5e6b15..6da423ec356f 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1131,7 +1131,7 @@ static inline bool free_page_is_bad(struct page *page) return true; } -static int free_tail_pages_check(struct page *head_page, struct page *page) +static int free_tail_page_prepare(struct page *head_page, struct page *page) { struct folio *folio = (struct folio *)head_page; int ret = 1; @@ -1142,7 +1142,7 @@ static int free_tail_pages_check(struct page *head_page, struct page *page) */ BUILD_BUG_ON((unsigned long)LIST_POISON1 & 1); - if (!IS_ENABLED(CONFIG_DEBUG_VM)) { + if (!static_branch_unlikely(&check_pages_enabled)) { ret = 0; goto out; } @@ -1276,9 +1276,9 @@ static __always_inline bool free_pages_prepare(struct page *page, ClearPageHasHWPoisoned(page); for (i = 1; i < (1 << order); i++) { if (compound) - bad += free_tail_pages_check(page, page + i); + bad += free_tail_page_prepare(page, page + i); if (is_check_pages_enabled()) { - if (unlikely(free_page_is_bad(page + i))) { + if (free_page_is_bad(page + i)) { bad++; continue; } @@ -1627,7 +1627,7 @@ static inline bool check_new_pages(struct page *page, unsigned int order) for (int i = 0; i < (1 << order); i++) { struct page *p = page + i; - if (unlikely(check_new_page(p))) + if (check_new_page(p)) return true; } }