mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-07-05 13:51:52 +00:00
mm/sparsemem: convert kmalloc_section_memmap() to populate_section_memmap()
Allow sub-section sized ranges to be added to the memmap. populate_section_memmap() takes an explict pfn range rather than assuming a full section, and those parameters are plumbed all the way through to vmmemap_populate(). There should be no sub-section usage in current deployments. New warnings are added to clarify which memmap allocation paths are sub-section capable. Link: http://lkml.kernel.org/r/156092352058.979959.6551283472062305149.stgit@dwillia2-desk3.amr.corp.intel.com Signed-off-by: Dan Williams <dan.j.williams@intel.com> Reviewed-by: Pavel Tatashin <pasha.tatashin@soleen.com> Tested-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com> [ppc64] Reviewed-by: Oscar Salvador <osalvador@suse.de> Cc: Michal Hocko <mhocko@suse.com> Cc: David Hildenbrand <david@redhat.com> Cc: Logan Gunthorpe <logang@deltatee.com> Cc: Jane Chu <jane.chu@oracle.com> Cc: Jeff Moyer <jmoyer@redhat.com> Cc: Jérôme Glisse <jglisse@redhat.com> Cc: Jonathan Corbet <corbet@lwn.net> Cc: Mike Rapoport <rppt@linux.ibm.com> Cc: Toshi Kani <toshi.kani@hpe.com> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Wei Yang <richardw.yang@linux.intel.com> Cc: Jason Gunthorpe <jgg@mellanox.com> Cc: Christoph Hellwig <hch@lst.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
49ba3c6b37
commit
e9c0a3f054
4 changed files with 46 additions and 33 deletions
|
@ -245,19 +245,26 @@ int __meminit vmemmap_populate_basepages(unsigned long start,
|
|||
return 0;
|
||||
}
|
||||
|
||||
struct page * __meminit sparse_mem_map_populate(unsigned long pnum, int nid,
|
||||
struct vmem_altmap *altmap)
|
||||
struct page * __meminit __populate_section_memmap(unsigned long pfn,
|
||||
unsigned long nr_pages, int nid, struct vmem_altmap *altmap)
|
||||
{
|
||||
unsigned long start;
|
||||
unsigned long end;
|
||||
struct page *map;
|
||||
|
||||
map = pfn_to_page(pnum * PAGES_PER_SECTION);
|
||||
start = (unsigned long)map;
|
||||
end = (unsigned long)(map + PAGES_PER_SECTION);
|
||||
/*
|
||||
* The minimum granularity of memmap extensions is
|
||||
* PAGES_PER_SUBSECTION as allocations are tracked in the
|
||||
* 'subsection_map' bitmap of the section.
|
||||
*/
|
||||
end = ALIGN(pfn + nr_pages, PAGES_PER_SUBSECTION);
|
||||
pfn &= PAGE_SUBSECTION_MASK;
|
||||
nr_pages = end - pfn;
|
||||
|
||||
start = (unsigned long) pfn_to_page(pfn);
|
||||
end = start + nr_pages * sizeof(struct page);
|
||||
|
||||
if (vmemmap_populate(start, end, nid, altmap))
|
||||
return NULL;
|
||||
|
||||
return map;
|
||||
return pfn_to_page(pfn);
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue