mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-04-07 23:14:21 +00:00
mm: trigger page reclaim in alloc_contig_range() to stabilise watermarks
alloc_contig_range() performs memory allocation so it also should keep track on keeping the correct level of memory watermarks. This commit adds a call to *_slowpath style reclaim to grab enough pages to make sure that the final collection of contiguous pages from freelists will not starve the system. Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com> Signed-off-by: Kyungmin Park <kyungmin.park@samsung.com> CC: Michal Nazarewicz <mina86@mina86.com> Tested-by: Rob Clark <rob.clark@linaro.org> Tested-by: Ohad Ben-Cohen <ohad@wizery.com> Tested-by: Benjamin Gaignard <benjamin.gaignard@linaro.org> Tested-by: Robert Nelson <robertcnelson@gmail.com> Tested-by: Barry Song <Baohua.Song@csr.com>
This commit is contained in:
parent
bba9071087
commit
49f223a9cd
2 changed files with 69 additions and 0 deletions
|
@ -63,8 +63,10 @@ enum {
|
||||||
|
|
||||||
#ifdef CONFIG_CMA
|
#ifdef CONFIG_CMA
|
||||||
# define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA)
|
# define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA)
|
||||||
|
# define cma_wmark_pages(zone) zone->min_cma_pages
|
||||||
#else
|
#else
|
||||||
# define is_migrate_cma(migratetype) false
|
# define is_migrate_cma(migratetype) false
|
||||||
|
# define cma_wmark_pages(zone) 0
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#define for_each_migratetype_order(order, type) \
|
#define for_each_migratetype_order(order, type) \
|
||||||
|
@ -370,6 +372,13 @@ struct zone {
|
||||||
#ifdef CONFIG_MEMORY_HOTPLUG
|
#ifdef CONFIG_MEMORY_HOTPLUG
|
||||||
/* see spanned/present_pages for more description */
|
/* see spanned/present_pages for more description */
|
||||||
seqlock_t span_seqlock;
|
seqlock_t span_seqlock;
|
||||||
|
#endif
|
||||||
|
#ifdef CONFIG_CMA
|
||||||
|
/*
|
||||||
|
* CMA needs to increase watermark levels during the allocation
|
||||||
|
* process to make sure that the system is not starved.
|
||||||
|
*/
|
||||||
|
unsigned long min_cma_pages;
|
||||||
#endif
|
#endif
|
||||||
struct free_area free_area[MAX_ORDER];
|
struct free_area free_area[MAX_ORDER];
|
||||||
|
|
||||||
|
|
|
@ -5079,6 +5079,11 @@ static void __setup_per_zone_wmarks(void)
|
||||||
|
|
||||||
zone->watermark[WMARK_LOW] = min_wmark_pages(zone) + (tmp >> 2);
|
zone->watermark[WMARK_LOW] = min_wmark_pages(zone) + (tmp >> 2);
|
||||||
zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + (tmp >> 1);
|
zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + (tmp >> 1);
|
||||||
|
|
||||||
|
zone->watermark[WMARK_MIN] += cma_wmark_pages(zone);
|
||||||
|
zone->watermark[WMARK_LOW] += cma_wmark_pages(zone);
|
||||||
|
zone->watermark[WMARK_HIGH] += cma_wmark_pages(zone);
|
||||||
|
|
||||||
setup_zone_migrate_reserve(zone);
|
setup_zone_migrate_reserve(zone);
|
||||||
spin_unlock_irqrestore(&zone->lock, flags);
|
spin_unlock_irqrestore(&zone->lock, flags);
|
||||||
}
|
}
|
||||||
|
@ -5684,6 +5689,54 @@ static int __alloc_contig_migrate_range(unsigned long start, unsigned long end)
|
||||||
return ret > 0 ? 0 : ret;
|
return ret > 0 ? 0 : ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Update zone's cma pages counter used for watermark level calculation.
|
||||||
|
*/
|
||||||
|
static inline void __update_cma_watermarks(struct zone *zone, int count)
|
||||||
|
{
|
||||||
|
unsigned long flags;
|
||||||
|
spin_lock_irqsave(&zone->lock, flags);
|
||||||
|
zone->min_cma_pages += count;
|
||||||
|
spin_unlock_irqrestore(&zone->lock, flags);
|
||||||
|
setup_per_zone_wmarks();
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Trigger memory pressure bump to reclaim some pages in order to be able to
|
||||||
|
* allocate 'count' pages in single page units. Does similar work as
|
||||||
|
*__alloc_pages_slowpath() function.
|
||||||
|
*/
|
||||||
|
static int __reclaim_pages(struct zone *zone, gfp_t gfp_mask, int count)
|
||||||
|
{
|
||||||
|
enum zone_type high_zoneidx = gfp_zone(gfp_mask);
|
||||||
|
struct zonelist *zonelist = node_zonelist(0, gfp_mask);
|
||||||
|
int did_some_progress = 0;
|
||||||
|
int order = 1;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Increase level of watermarks to force kswapd do his job
|
||||||
|
* to stabilise at new watermark level.
|
||||||
|
*/
|
||||||
|
__update_cma_watermarks(zone, count);
|
||||||
|
|
||||||
|
/* Obey watermarks as if the page was being allocated */
|
||||||
|
while (!zone_watermark_ok(zone, 0, low_wmark_pages(zone), 0, 0)) {
|
||||||
|
wake_all_kswapd(order, zonelist, high_zoneidx, zone_idx(zone));
|
||||||
|
|
||||||
|
did_some_progress = __perform_reclaim(gfp_mask, order, zonelist,
|
||||||
|
NULL);
|
||||||
|
if (!did_some_progress) {
|
||||||
|
/* Exhausted what can be done so it's blamo time */
|
||||||
|
out_of_memory(zonelist, gfp_mask, order, NULL, false);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Restore original watermark levels. */
|
||||||
|
__update_cma_watermarks(zone, -count);
|
||||||
|
|
||||||
|
return count;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* alloc_contig_range() -- tries to allocate given range of pages
|
* alloc_contig_range() -- tries to allocate given range of pages
|
||||||
* @start: start PFN to allocate
|
* @start: start PFN to allocate
|
||||||
|
@ -5782,6 +5835,13 @@ int alloc_contig_range(unsigned long start, unsigned long end,
|
||||||
goto done;
|
goto done;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Reclaim enough pages to make sure that contiguous allocation
|
||||||
|
* will not starve the system.
|
||||||
|
*/
|
||||||
|
__reclaim_pages(zone, GFP_HIGHUSER_MOVABLE, end-start);
|
||||||
|
|
||||||
|
/* Grab isolated pages from freelists. */
|
||||||
outer_end = isolate_freepages_range(outer_start, end);
|
outer_end = isolate_freepages_range(outer_start, end);
|
||||||
if (!outer_end) {
|
if (!outer_end) {
|
||||||
ret = -EBUSY;
|
ret = -EBUSY;
|
||||||
|
|
Loading…
Add table
Reference in a new issue