mirror of
https://github.com/Fishwaldo/linux-bl808.git
synced 2025-06-17 20:25:19 +00:00
[PATCH] reduce MAX_NR_ZONES: use enum to define zones, reformat and comment
Use enum for zones and reformat zones dependent information Add comments explaning the use of zones and add a zones_t type for zone numbers. Line up information that will be #ifdefd by the following patches. [akpm@osdl.org: comment cleanups] Signed-off-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
parent
98d2b0ebda
commit
2f1b624868
3 changed files with 69 additions and 28 deletions
|
@ -470,7 +470,7 @@ void split_page(struct page *page, unsigned int order);
|
|||
#define SECTIONS_MASK ((1UL << SECTIONS_WIDTH) - 1)
|
||||
#define ZONETABLE_MASK ((1UL << ZONETABLE_SHIFT) - 1)
|
||||
|
||||
static inline unsigned long page_zonenum(struct page *page)
|
||||
static inline enum zone_type page_zonenum(struct page *page)
|
||||
{
|
||||
return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK;
|
||||
}
|
||||
|
@ -499,11 +499,12 @@ static inline unsigned long page_to_section(struct page *page)
|
|||
return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK;
|
||||
}
|
||||
|
||||
static inline void set_page_zone(struct page *page, unsigned long zone)
|
||||
static inline void set_page_zone(struct page *page, enum zone_type zone)
|
||||
{
|
||||
page->flags &= ~(ZONES_MASK << ZONES_PGSHIFT);
|
||||
page->flags |= (zone & ZONES_MASK) << ZONES_PGSHIFT;
|
||||
}
|
||||
|
||||
static inline void set_page_node(struct page *page, unsigned long node)
|
||||
{
|
||||
page->flags &= ~(NODES_MASK << NODES_PGSHIFT);
|
||||
|
@ -515,7 +516,7 @@ static inline void set_page_section(struct page *page, unsigned long section)
|
|||
page->flags |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT;
|
||||
}
|
||||
|
||||
static inline void set_page_links(struct page *page, unsigned long zone,
|
||||
static inline void set_page_links(struct page *page, enum zone_type zone,
|
||||
unsigned long node, unsigned long pfn)
|
||||
{
|
||||
set_page_zone(page, zone);
|
||||
|
|
|
@ -88,14 +88,53 @@ struct per_cpu_pageset {
|
|||
#define zone_pcp(__z, __cpu) (&(__z)->pageset[(__cpu)])
|
||||
#endif
|
||||
|
||||
#define ZONE_DMA 0
|
||||
#define ZONE_DMA32 1
|
||||
#define ZONE_NORMAL 2
|
||||
#define ZONE_HIGHMEM 3
|
||||
enum zone_type {
|
||||
/*
|
||||
* ZONE_DMA is used when there are devices that are not able
|
||||
* to do DMA to all of addressable memory (ZONE_NORMAL). Then we
|
||||
* carve out the portion of memory that is needed for these devices.
|
||||
* The range is arch specific.
|
||||
*
|
||||
* Some examples
|
||||
*
|
||||
* Architecture Limit
|
||||
* ---------------------------
|
||||
* parisc, ia64, sparc <4G
|
||||
* s390 <2G
|
||||
* arm26 <48M
|
||||
* arm Various
|
||||
* alpha Unlimited or 0-16MB.
|
||||
*
|
||||
* i386, x86_64 and multiple other arches
|
||||
* <16M.
|
||||
*/
|
||||
ZONE_DMA,
|
||||
/*
|
||||
* x86_64 needs two ZONE_DMAs because it supports devices that are
|
||||
* only able to do DMA to the lower 16M but also 32 bit devices that
|
||||
* can only do DMA areas below 4G.
|
||||
*/
|
||||
ZONE_DMA32,
|
||||
/*
|
||||
* Normal addressable memory is in ZONE_NORMAL. DMA operations can be
|
||||
* performed on pages in ZONE_NORMAL if the DMA devices support
|
||||
* transfers to all addressable memory.
|
||||
*/
|
||||
ZONE_NORMAL,
|
||||
/*
|
||||
* A memory area that is only addressable by the kernel through
|
||||
* mapping portions into its own address space. This is for example
|
||||
* used by i386 to allow the kernel to address the memory beyond
|
||||
* 900MB. The kernel will set up special mappings (page
|
||||
* table entries on i386) for each page that the kernel needs to
|
||||
* access.
|
||||
*/
|
||||
ZONE_HIGHMEM,
|
||||
|
||||
#define MAX_NR_ZONES 4 /* Sync this with ZONES_SHIFT */
|
||||
#define ZONES_SHIFT 2 /* ceil(log2(MAX_NR_ZONES)) */
|
||||
MAX_NR_ZONES
|
||||
};
|
||||
|
||||
#define ZONES_SHIFT 2 /* ceil(log2(MAX_NR_ZONES)) */
|
||||
|
||||
/*
|
||||
* When a memory allocation must conform to specific limitations (such
|
||||
|
@ -126,16 +165,6 @@ struct per_cpu_pageset {
|
|||
/* #define GFP_ZONETYPES (GFP_ZONEMASK + 1) */ /* Non-loner */
|
||||
#define GFP_ZONETYPES ((GFP_ZONEMASK + 1) / 2 + 1) /* Loner */
|
||||
|
||||
/*
|
||||
* On machines where it is needed (eg PCs) we divide physical memory
|
||||
* into multiple physical zones. On a 32bit PC we have 4 zones:
|
||||
*
|
||||
* ZONE_DMA < 16 MB ISA DMA capable memory
|
||||
* ZONE_DMA32 0 MB Empty
|
||||
* ZONE_NORMAL 16-896 MB direct mapped by the kernel
|
||||
* ZONE_HIGHMEM > 896 MB only page cache and user processes
|
||||
*/
|
||||
|
||||
struct zone {
|
||||
/* Fields commonly accessed by the page allocator */
|
||||
unsigned long free_pages;
|
||||
|
@ -266,7 +295,6 @@ struct zone {
|
|||
char *name;
|
||||
} ____cacheline_internodealigned_in_smp;
|
||||
|
||||
|
||||
/*
|
||||
* The "priority" of VM scanning is how much of the queues we will scan in one
|
||||
* go. A value of 12 for DEF_PRIORITY implies that we will scan 1/4096th of the
|
||||
|
@ -373,12 +401,12 @@ static inline int populated_zone(struct zone *zone)
|
|||
return (!!zone->present_pages);
|
||||
}
|
||||
|
||||
static inline int is_highmem_idx(int idx)
|
||||
static inline int is_highmem_idx(enum zone_type idx)
|
||||
{
|
||||
return (idx == ZONE_HIGHMEM);
|
||||
}
|
||||
|
||||
static inline int is_normal_idx(int idx)
|
||||
static inline int is_normal_idx(enum zone_type idx)
|
||||
{
|
||||
return (idx == ZONE_NORMAL);
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue