mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-03-30 19:15:14 +00:00
It is not well analyzed that when/why compaction start/finish or not. With these new tracepoints, we can know much more about start/finish reason of compaction. I can find following bug with these tracepoint. http://www.spinics.net/lists/linux-mm/msg81582.html Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Mel Gorman <mgorman@suse.de> Cc: David Rientjes <rientjes@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
158 lines
4.8 KiB
C
158 lines
4.8 KiB
C
#ifndef _LINUX_COMPACTION_H
|
|
#define _LINUX_COMPACTION_H
|
|
|
|
/* Return values for compact_zone() and try_to_compact_pages() */
|
|
/* compaction didn't start as it was deferred due to past failures */
|
|
#define COMPACT_DEFERRED 0
|
|
/* compaction didn't start as it was not possible or direct reclaim was more suitable */
|
|
#define COMPACT_SKIPPED 1
|
|
/* compaction should continue to another pageblock */
|
|
#define COMPACT_CONTINUE 2
|
|
/* direct compaction partially compacted a zone and there are suitable pages */
|
|
#define COMPACT_PARTIAL 3
|
|
/* The full zone was compacted */
|
|
#define COMPACT_COMPLETE 4
|
|
/* For more detailed tracepoint output */
|
|
#define COMPACT_NO_SUITABLE_PAGE 5
|
|
#define COMPACT_NOT_SUITABLE_ZONE 6
|
|
/* When adding new state, please change compaction_status_string, too */
|
|
|
|
/* Used to signal whether compaction detected need_sched() or lock contention */
|
|
/* No contention detected */
|
|
#define COMPACT_CONTENDED_NONE 0
|
|
/* Either need_sched() was true or fatal signal pending */
|
|
#define COMPACT_CONTENDED_SCHED 1
|
|
/* Zone lock or lru_lock was contended in async compaction */
|
|
#define COMPACT_CONTENDED_LOCK 2
|
|
|
|
struct alloc_context; /* in mm/internal.h */
|
|
|
|
#ifdef CONFIG_COMPACTION
|
|
extern int sysctl_compact_memory;
|
|
extern int sysctl_compaction_handler(struct ctl_table *table, int write,
|
|
void __user *buffer, size_t *length, loff_t *ppos);
|
|
extern int sysctl_extfrag_threshold;
|
|
extern int sysctl_extfrag_handler(struct ctl_table *table, int write,
|
|
void __user *buffer, size_t *length, loff_t *ppos);
|
|
|
|
extern int fragmentation_index(struct zone *zone, unsigned int order);
|
|
extern unsigned long try_to_compact_pages(gfp_t gfp_mask, unsigned int order,
|
|
int alloc_flags, const struct alloc_context *ac,
|
|
enum migrate_mode mode, int *contended);
|
|
extern void compact_pgdat(pg_data_t *pgdat, int order);
|
|
extern void reset_isolation_suitable(pg_data_t *pgdat);
|
|
extern unsigned long compaction_suitable(struct zone *zone, int order,
|
|
int alloc_flags, int classzone_idx);
|
|
|
|
/* Do not skip compaction more than 64 times */
|
|
#define COMPACT_MAX_DEFER_SHIFT 6
|
|
|
|
/*
|
|
* Compaction is deferred when compaction fails to result in a page
|
|
* allocation success. 1 << compact_defer_limit compactions are skipped up
|
|
* to a limit of 1 << COMPACT_MAX_DEFER_SHIFT
|
|
*/
|
|
static inline void defer_compaction(struct zone *zone, int order)
|
|
{
|
|
zone->compact_considered = 0;
|
|
zone->compact_defer_shift++;
|
|
|
|
if (order < zone->compact_order_failed)
|
|
zone->compact_order_failed = order;
|
|
|
|
if (zone->compact_defer_shift > COMPACT_MAX_DEFER_SHIFT)
|
|
zone->compact_defer_shift = COMPACT_MAX_DEFER_SHIFT;
|
|
}
|
|
|
|
/* Returns true if compaction should be skipped this time */
|
|
static inline bool compaction_deferred(struct zone *zone, int order)
|
|
{
|
|
unsigned long defer_limit = 1UL << zone->compact_defer_shift;
|
|
|
|
if (order < zone->compact_order_failed)
|
|
return false;
|
|
|
|
/* Avoid possible overflow */
|
|
if (++zone->compact_considered > defer_limit)
|
|
zone->compact_considered = defer_limit;
|
|
|
|
return zone->compact_considered < defer_limit;
|
|
}
|
|
|
|
/*
|
|
* Update defer tracking counters after successful compaction of given order,
|
|
* which means an allocation either succeeded (alloc_success == true) or is
|
|
* expected to succeed.
|
|
*/
|
|
static inline void compaction_defer_reset(struct zone *zone, int order,
|
|
bool alloc_success)
|
|
{
|
|
if (alloc_success) {
|
|
zone->compact_considered = 0;
|
|
zone->compact_defer_shift = 0;
|
|
}
|
|
if (order >= zone->compact_order_failed)
|
|
zone->compact_order_failed = order + 1;
|
|
}
|
|
|
|
/* Returns true if restarting compaction after many failures */
|
|
static inline bool compaction_restarting(struct zone *zone, int order)
|
|
{
|
|
if (order < zone->compact_order_failed)
|
|
return false;
|
|
|
|
return zone->compact_defer_shift == COMPACT_MAX_DEFER_SHIFT &&
|
|
zone->compact_considered >= 1UL << zone->compact_defer_shift;
|
|
}
|
|
|
|
#else
|
|
static inline unsigned long try_to_compact_pages(gfp_t gfp_mask,
|
|
unsigned int order, int alloc_flags,
|
|
const struct alloc_context *ac,
|
|
enum migrate_mode mode, int *contended)
|
|
{
|
|
return COMPACT_CONTINUE;
|
|
}
|
|
|
|
static inline void compact_pgdat(pg_data_t *pgdat, int order)
|
|
{
|
|
}
|
|
|
|
static inline void reset_isolation_suitable(pg_data_t *pgdat)
|
|
{
|
|
}
|
|
|
|
static inline unsigned long compaction_suitable(struct zone *zone, int order,
|
|
int alloc_flags, int classzone_idx)
|
|
{
|
|
return COMPACT_SKIPPED;
|
|
}
|
|
|
|
static inline void defer_compaction(struct zone *zone, int order)
|
|
{
|
|
}
|
|
|
|
static inline bool compaction_deferred(struct zone *zone, int order)
|
|
{
|
|
return true;
|
|
}
|
|
|
|
#endif /* CONFIG_COMPACTION */
|
|
|
|
#if defined(CONFIG_COMPACTION) && defined(CONFIG_SYSFS) && defined(CONFIG_NUMA)
|
|
extern int compaction_register_node(struct node *node);
|
|
extern void compaction_unregister_node(struct node *node);
|
|
|
|
#else
|
|
|
|
static inline int compaction_register_node(struct node *node)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static inline void compaction_unregister_node(struct node *node)
|
|
{
|
|
}
|
|
#endif /* CONFIG_COMPACTION && CONFIG_SYSFS && CONFIG_NUMA */
|
|
|
|
#endif /* _LINUX_COMPACTION_H */
|