mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-07-06 22:42:10 +00:00
mm: optimize put_mems_allowed() usage
Since put_mems_allowed() is strictly optional, its a seqcount retry, we don't need to evaluate the function if the allocation was in fact successful, saving a smp_rmb some loads and comparisons on some relative fast-paths. Since the naming, get/put_mems_allowed() does suggest a mandatory pairing, rename the interface, as suggested by Mel, to resemble the seqcount interface. This gives us: read_mems_allowed_begin() and read_mems_allowed_retry(), where it is important to note that the return value of the latter call is inverted from its previous incarnation. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Mel Gorman <mgorman@suse.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
91ca918648
commit
d26914d117
8 changed files with 38 additions and 39 deletions
|
@ -87,25 +87,26 @@ extern void rebuild_sched_domains(void);
|
|||
extern void cpuset_print_task_mems_allowed(struct task_struct *p);
|
||||
|
||||
/*
|
||||
* get_mems_allowed is required when making decisions involving mems_allowed
|
||||
* such as during page allocation. mems_allowed can be updated in parallel
|
||||
* and depending on the new value an operation can fail potentially causing
|
||||
* process failure. A retry loop with get_mems_allowed and put_mems_allowed
|
||||
* prevents these artificial failures.
|
||||
* read_mems_allowed_begin is required when making decisions involving
|
||||
* mems_allowed such as during page allocation. mems_allowed can be updated in
|
||||
* parallel and depending on the new value an operation can fail potentially
|
||||
* causing process failure. A retry loop with read_mems_allowed_begin and
|
||||
* read_mems_allowed_retry prevents these artificial failures.
|
||||
*/
|
||||
static inline unsigned int get_mems_allowed(void)
|
||||
static inline unsigned int read_mems_allowed_begin(void)
|
||||
{
|
||||
return read_seqcount_begin(¤t->mems_allowed_seq);
|
||||
}
|
||||
|
||||
/*
|
||||
* If this returns false, the operation that took place after get_mems_allowed
|
||||
* may have failed. It is up to the caller to retry the operation if
|
||||
* If this returns true, the operation that took place after
|
||||
* read_mems_allowed_begin may have failed artificially due to a concurrent
|
||||
* update of mems_allowed. It is up to the caller to retry the operation if
|
||||
* appropriate.
|
||||
*/
|
||||
static inline bool put_mems_allowed(unsigned int seq)
|
||||
static inline bool read_mems_allowed_retry(unsigned int seq)
|
||||
{
|
||||
return !read_seqcount_retry(¤t->mems_allowed_seq, seq);
|
||||
return read_seqcount_retry(¤t->mems_allowed_seq, seq);
|
||||
}
|
||||
|
||||
static inline void set_mems_allowed(nodemask_t nodemask)
|
||||
|
@ -225,14 +226,14 @@ static inline void set_mems_allowed(nodemask_t nodemask)
|
|||
{
|
||||
}
|
||||
|
||||
static inline unsigned int get_mems_allowed(void)
|
||||
static inline unsigned int read_mems_allowed_begin(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline bool put_mems_allowed(unsigned int seq)
|
||||
static inline bool read_mems_allowed_retry(unsigned int seq)
|
||||
{
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
#endif /* !CONFIG_CPUSETS */
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue