mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-07-22 23:04:43 +00:00
Merge branch 'akpm' (patches from Andrew)
Merge more updates from Andrew Morton: "More mm/ work, plenty more to come Subsystems affected by this patch series: slub, memcg, gup, kasan, pagealloc, hugetlb, vmscan, tools, mempolicy, memblock, hugetlbfs, thp, mmap, kconfig" * akpm: (131 commits) arm64: mm: use ARCH_HAS_DEBUG_WX instead of arch defined x86: mm: use ARCH_HAS_DEBUG_WX instead of arch defined riscv: support DEBUG_WX mm: add DEBUG_WX support drivers/base/memory.c: cache memory blocks in xarray to accelerate lookup mm/thp: rename pmd_mknotpresent() as pmd_mkinvalid() powerpc/mm: drop platform defined pmd_mknotpresent() mm: thp: don't need to drain lru cache when splitting and mlocking THP hugetlbfs: get unmapped area below TASK_UNMAPPED_BASE for hugetlbfs sparc32: register memory occupied by kernel as memblock.memory include/linux/memblock.h: fix minor typo and unclear comment mm, mempolicy: fix up gup usage in lookup_node tools/vm/page_owner_sort.c: filter out unneeded line mm: swap: memcg: fix memcg stats for huge pages mm: swap: fix vmstats for huge pages mm: vmscan: limit the range of LRU type balancing mm: vmscan: reclaim writepage is IO cost mm: vmscan: determine anon/file pressure balance at the reclaim root mm: balance LRU lists based on relative thrashing mm: only count actual rotations as LRU reclaim cost ...
This commit is contained in:
commit
ee01c4d72a
147 changed files with 3456 additions and 2683 deletions
|
@ -242,19 +242,6 @@ static inline bool is_active_lru(enum lru_list lru)
|
|||
return (lru == LRU_ACTIVE_ANON || lru == LRU_ACTIVE_FILE);
|
||||
}
|
||||
|
||||
struct zone_reclaim_stat {
|
||||
/*
|
||||
* The pageout code in vmscan.c keeps track of how many of the
|
||||
* mem/swap backed and file backed pages are referenced.
|
||||
* The higher the rotated/scanned ratio, the more valuable
|
||||
* that cache is.
|
||||
*
|
||||
* The anon LRU stats live in [0], file LRU stats in [1]
|
||||
*/
|
||||
unsigned long recent_rotated[2];
|
||||
unsigned long recent_scanned[2];
|
||||
};
|
||||
|
||||
enum lruvec_flags {
|
||||
LRUVEC_CONGESTED, /* lruvec has many dirty pages
|
||||
* backed by a congested BDI
|
||||
|
@ -263,7 +250,13 @@ enum lruvec_flags {
|
|||
|
||||
struct lruvec {
|
||||
struct list_head lists[NR_LRU_LISTS];
|
||||
struct zone_reclaim_stat reclaim_stat;
|
||||
/*
|
||||
* These track the cost of reclaiming one LRU - file or anon -
|
||||
* over the other. As the observed cost of reclaiming one LRU
|
||||
* increases, the reclaim scan balance tips toward the other.
|
||||
*/
|
||||
unsigned long anon_cost;
|
||||
unsigned long file_cost;
|
||||
/* Evictions & activations on the inactive file list */
|
||||
atomic_long_t inactive_age;
|
||||
/* Refaults at the time of last reclaim cycle */
|
||||
|
@ -680,6 +673,8 @@ typedef struct pglist_data {
|
|||
/*
|
||||
* Must be held any time you expect node_start_pfn,
|
||||
* node_present_pages, node_spanned_pages or nr_zones to stay constant.
|
||||
* Also synchronizes pgdat->first_deferred_pfn during deferred page
|
||||
* init.
|
||||
*
|
||||
* pgdat_resize_lock() and pgdat_resize_unlock() are provided to
|
||||
* manipulate node_size_lock without checking for CONFIG_MEMORY_HOTPLUG
|
||||
|
@ -699,13 +694,13 @@ typedef struct pglist_data {
|
|||
struct task_struct *kswapd; /* Protected by
|
||||
mem_hotplug_begin/end() */
|
||||
int kswapd_order;
|
||||
enum zone_type kswapd_classzone_idx;
|
||||
enum zone_type kswapd_highest_zoneidx;
|
||||
|
||||
int kswapd_failures; /* Number of 'reclaimed == 0' runs */
|
||||
|
||||
#ifdef CONFIG_COMPACTION
|
||||
int kcompactd_max_order;
|
||||
enum zone_type kcompactd_classzone_idx;
|
||||
enum zone_type kcompactd_highest_zoneidx;
|
||||
wait_queue_head_t kcompactd_wait;
|
||||
struct task_struct *kcompactd;
|
||||
#endif
|
||||
|
@ -783,15 +778,15 @@ static inline bool pgdat_is_empty(pg_data_t *pgdat)
|
|||
|
||||
void build_all_zonelists(pg_data_t *pgdat);
|
||||
void wakeup_kswapd(struct zone *zone, gfp_t gfp_mask, int order,
|
||||
enum zone_type classzone_idx);
|
||||
enum zone_type highest_zoneidx);
|
||||
bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
|
||||
int classzone_idx, unsigned int alloc_flags,
|
||||
int highest_zoneidx, unsigned int alloc_flags,
|
||||
long free_pages);
|
||||
bool zone_watermark_ok(struct zone *z, unsigned int order,
|
||||
unsigned long mark, int classzone_idx,
|
||||
unsigned long mark, int highest_zoneidx,
|
||||
unsigned int alloc_flags);
|
||||
bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
|
||||
unsigned long mark, int classzone_idx);
|
||||
unsigned long mark, int highest_zoneidx);
|
||||
enum memmap_context {
|
||||
MEMMAP_EARLY,
|
||||
MEMMAP_HOTPLUG,
|
||||
|
@ -876,7 +871,7 @@ extern int movable_zone;
|
|||
#ifdef CONFIG_HIGHMEM
|
||||
static inline int zone_movable_is_highmem(void)
|
||||
{
|
||||
#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
|
||||
#ifdef CONFIG_NEED_MULTIPLE_NODES
|
||||
return movable_zone == ZONE_HIGHMEM;
|
||||
#else
|
||||
return (ZONE_MOVABLE - 1) == ZONE_HIGHMEM;
|
||||
|
@ -1079,15 +1074,6 @@ static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist,
|
|||
#include <asm/sparsemem.h>
|
||||
#endif
|
||||
|
||||
#if !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) && \
|
||||
!defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP)
|
||||
static inline unsigned long early_pfn_to_nid(unsigned long pfn)
|
||||
{
|
||||
BUILD_BUG_ON(IS_ENABLED(CONFIG_NUMA));
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_FLATMEM
|
||||
#define pfn_to_nid(pfn) (0)
|
||||
#endif
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue