mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-07-10 08:33:14 +00:00
mm/memory_hotplug: optimize memory hotplug
During memory hotplugging we traverse struct pages three times: 1. memset(0) in sparse_add_one_section() 2. loop in __add_section() to set do: set_page_node(page, nid); and SetPageReserved(page); 3. loop in memmap_init_zone() to call __init_single_pfn() This patch removes the first two loops, and leaves only loop 3. All struct pages are initialized in one place, the same as it is done during boot. The benefits: - We improve memory hotplug performance because we are not evicting the cache several times and also reduce loop branching overhead. - Remove condition from hotpath in __init_single_pfn(), that was added in order to fix the problem that was reported by Bharata in the above email thread, thus also improve performance during normal boot. - Make memory hotplug more similar to the boot memory initialization path because we zero and initialize struct pages only in one function. - Simplifies memory hotplug struct page initialization code, and thus enables future improvements, such as multi-threading the initialization of struct pages in order to improve hotplug performance even further on larger machines. [pasha.tatashin@oracle.com: v5] Link: http://lkml.kernel.org/r/20180228030308.1116-7-pasha.tatashin@oracle.com Link: http://lkml.kernel.org/r/20180215165920.8570-7-pasha.tatashin@oracle.com Signed-off-by: Pavel Tatashin <pasha.tatashin@oracle.com> Reviewed-by: Ingo Molnar <mingo@kernel.org> Cc: Michal Hocko <mhocko@suse.com> Cc: Baoquan He <bhe@redhat.com> Cc: Bharata B Rao <bharata@linux.vnet.ibm.com> Cc: Daniel Jordan <daniel.m.jordan@oracle.com> Cc: Dan Williams <dan.j.williams@intel.com> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Mel Gorman <mgorman@techsingularity.net> Cc: Steven Sistare <steven.sistare@oracle.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Vlastimil Babka <vbabka@suse.cz> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
fc44f7f923
commit
d0dc12e86b
5 changed files with 28 additions and 38 deletions
|
@ -1143,10 +1143,9 @@ static void free_one_page(struct zone *zone,
|
|||
}
|
||||
|
||||
static void __meminit __init_single_page(struct page *page, unsigned long pfn,
|
||||
unsigned long zone, int nid, bool zero)
|
||||
unsigned long zone, int nid)
|
||||
{
|
||||
if (zero)
|
||||
mm_zero_struct_page(page);
|
||||
mm_zero_struct_page(page);
|
||||
set_page_links(page, zone, nid, pfn);
|
||||
init_page_count(page);
|
||||
page_mapcount_reset(page);
|
||||
|
@ -1160,12 +1159,6 @@ static void __meminit __init_single_page(struct page *page, unsigned long pfn,
|
|||
#endif
|
||||
}
|
||||
|
||||
static void __meminit __init_single_pfn(unsigned long pfn, unsigned long zone,
|
||||
int nid, bool zero)
|
||||
{
|
||||
return __init_single_page(pfn_to_page(pfn), pfn, zone, nid, zero);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
|
||||
static void __meminit init_reserved_page(unsigned long pfn)
|
||||
{
|
||||
|
@ -1184,7 +1177,7 @@ static void __meminit init_reserved_page(unsigned long pfn)
|
|||
if (pfn >= zone->zone_start_pfn && pfn < zone_end_pfn(zone))
|
||||
break;
|
||||
}
|
||||
__init_single_pfn(pfn, zid, nid, true);
|
||||
__init_single_page(pfn_to_page(pfn), pfn, zid, nid);
|
||||
}
|
||||
#else
|
||||
static inline void init_reserved_page(unsigned long pfn)
|
||||
|
@ -1501,7 +1494,7 @@ static unsigned long __init deferred_init_pages(int nid, int zid,
|
|||
} else {
|
||||
page++;
|
||||
}
|
||||
__init_single_page(page, pfn, zid, nid, true);
|
||||
__init_single_page(page, pfn, zid, nid);
|
||||
nr_pages++;
|
||||
}
|
||||
return (nr_pages);
|
||||
|
@ -5434,6 +5427,7 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
|
|||
pg_data_t *pgdat = NODE_DATA(nid);
|
||||
unsigned long pfn;
|
||||
unsigned long nr_initialised = 0;
|
||||
struct page *page;
|
||||
#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
|
||||
struct memblock_region *r = NULL, *tmp;
|
||||
#endif
|
||||
|
@ -5486,6 +5480,11 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
|
|||
#endif
|
||||
|
||||
not_early:
|
||||
page = pfn_to_page(pfn);
|
||||
__init_single_page(page, pfn, zone, nid);
|
||||
if (context == MEMMAP_HOTPLUG)
|
||||
SetPageReserved(page);
|
||||
|
||||
/*
|
||||
* Mark the block movable so that blocks are reserved for
|
||||
* movable at startup. This will force kernel allocations
|
||||
|
@ -5502,15 +5501,8 @@ not_early:
|
|||
* because this is done early in sparse_add_one_section
|
||||
*/
|
||||
if (!(pfn & (pageblock_nr_pages - 1))) {
|
||||
struct page *page = pfn_to_page(pfn);
|
||||
|
||||
__init_single_page(page, pfn, zone, nid,
|
||||
context != MEMMAP_HOTPLUG);
|
||||
set_pageblock_migratetype(page, MIGRATE_MOVABLE);
|
||||
cond_resched();
|
||||
} else {
|
||||
__init_single_pfn(pfn, zone, nid,
|
||||
context != MEMMAP_HOTPLUG);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue