mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-06-26 16:41:25 +00:00
[PATCH] Add __GFP_THISNODE to avoid fallback to other nodes and ignore cpuset/memory policy restrictions
Add a new gfp flag __GFP_THISNODE to avoid fallback to other nodes. This flag is essential if a kernel component requires memory to be located on a certain node. It will be needed for alloc_pages_node() to force allocation on the indicated node and for alloc_pages() to force allocation on the current node. Signed-off-by: Christoph Lameter <clameter@sgi.com> Cc: Andy Whitcroft <apw@shadowen.org> Cc: Mel Gorman <mel@csn.ul.ie> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
parent
056c62418c
commit
9b819d204c
4 changed files with 7 additions and 3 deletions
|
@ -45,6 +45,7 @@ struct vm_area_struct;
|
||||||
#define __GFP_ZERO ((__force gfp_t)0x8000u)/* Return zeroed page on success */
|
#define __GFP_ZERO ((__force gfp_t)0x8000u)/* Return zeroed page on success */
|
||||||
#define __GFP_NOMEMALLOC ((__force gfp_t)0x10000u) /* Don't use emergency reserves */
|
#define __GFP_NOMEMALLOC ((__force gfp_t)0x10000u) /* Don't use emergency reserves */
|
||||||
#define __GFP_HARDWALL ((__force gfp_t)0x20000u) /* Enforce hardwall cpuset memory allocs */
|
#define __GFP_HARDWALL ((__force gfp_t)0x20000u) /* Enforce hardwall cpuset memory allocs */
|
||||||
|
#define __GFP_THISNODE ((__force gfp_t)0x40000u)/* No fallback, no policies */
|
||||||
|
|
||||||
#define __GFP_BITS_SHIFT 20 /* Room for 20 __GFP_FOO bits */
|
#define __GFP_BITS_SHIFT 20 /* Room for 20 __GFP_FOO bits */
|
||||||
#define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
|
#define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
|
||||||
|
@ -53,7 +54,7 @@ struct vm_area_struct;
|
||||||
#define GFP_LEVEL_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_FS| \
|
#define GFP_LEVEL_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_FS| \
|
||||||
__GFP_COLD|__GFP_NOWARN|__GFP_REPEAT| \
|
__GFP_COLD|__GFP_NOWARN|__GFP_REPEAT| \
|
||||||
__GFP_NOFAIL|__GFP_NORETRY|__GFP_NO_GROW|__GFP_COMP| \
|
__GFP_NOFAIL|__GFP_NORETRY|__GFP_NO_GROW|__GFP_COMP| \
|
||||||
__GFP_NOMEMALLOC|__GFP_HARDWALL)
|
__GFP_NOMEMALLOC|__GFP_HARDWALL|__GFP_THISNODE)
|
||||||
|
|
||||||
/* This equals 0, but use constants in case they ever change */
|
/* This equals 0, but use constants in case they ever change */
|
||||||
#define GFP_NOWAIT (GFP_ATOMIC & ~__GFP_HIGH)
|
#define GFP_NOWAIT (GFP_ATOMIC & ~__GFP_HIGH)
|
||||||
|
|
|
@ -2316,7 +2316,7 @@ int __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
|
||||||
const struct cpuset *cs; /* current cpuset ancestors */
|
const struct cpuset *cs; /* current cpuset ancestors */
|
||||||
int allowed; /* is allocation in zone z allowed? */
|
int allowed; /* is allocation in zone z allowed? */
|
||||||
|
|
||||||
if (in_interrupt())
|
if (in_interrupt() || (gfp_mask & __GFP_THISNODE))
|
||||||
return 1;
|
return 1;
|
||||||
node = z->zone_pgdat->node_id;
|
node = z->zone_pgdat->node_id;
|
||||||
might_sleep_if(!(gfp_mask & __GFP_HARDWALL));
|
might_sleep_if(!(gfp_mask & __GFP_HARDWALL));
|
||||||
|
|
|
@ -1290,7 +1290,7 @@ struct page *alloc_pages_current(gfp_t gfp, unsigned order)
|
||||||
|
|
||||||
if ((gfp & __GFP_WAIT) && !in_interrupt())
|
if ((gfp & __GFP_WAIT) && !in_interrupt())
|
||||||
cpuset_update_task_memory_state();
|
cpuset_update_task_memory_state();
|
||||||
if (!pol || in_interrupt())
|
if (!pol || in_interrupt() || (gfp & __GFP_THISNODE))
|
||||||
pol = &default_policy;
|
pol = &default_policy;
|
||||||
if (pol->policy == MPOL_INTERLEAVE)
|
if (pol->policy == MPOL_INTERLEAVE)
|
||||||
return alloc_page_interleave(gfp, order, interleave_nodes(pol));
|
return alloc_page_interleave(gfp, order, interleave_nodes(pol));
|
||||||
|
|
|
@ -893,6 +893,9 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int order,
|
||||||
* See also cpuset_zone_allowed() comment in kernel/cpuset.c.
|
* See also cpuset_zone_allowed() comment in kernel/cpuset.c.
|
||||||
*/
|
*/
|
||||||
do {
|
do {
|
||||||
|
if (unlikely((gfp_mask & __GFP_THISNODE) &&
|
||||||
|
(*z)->zone_pgdat != zonelist->zones[0]->zone_pgdat))
|
||||||
|
break;
|
||||||
if ((alloc_flags & ALLOC_CPUSET) &&
|
if ((alloc_flags & ALLOC_CPUSET) &&
|
||||||
!cpuset_zone_allowed(*z, gfp_mask))
|
!cpuset_zone_allowed(*z, gfp_mask))
|
||||||
continue;
|
continue;
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue