mirror of
https://github.com/Fishwaldo/linux-bl808.git
synced 2025-03-18 04:54:52 +00:00
mm: deduplicate cacheline padding code
There are three users (mmzone.h, memcontrol.h, page_counter.h) using similar code for forcing cacheline padding between fields of different structures. Dedup that code. Link: https://lkml.kernel.org/r/20220826230642.566725-1-shakeelb@google.com Signed-off-by: Shakeel Butt <shakeelb@google.com> Suggested-by: Feng Tang <feng.tang@intel.com> Reviewed-by: Feng Tang <feng.tang@intel.com> Acked-by: Michal Hocko <mhocko@suse.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
974f4367dd
commit
e6ad640bc4
4 changed files with 22 additions and 41 deletions
|
@ -85,4 +85,17 @@
|
|||
#define cache_line_size() L1_CACHE_BYTES
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Helper to add padding within a struct to ensure data fall into separate
|
||||
* cachelines.
|
||||
*/
|
||||
#if defined(CONFIG_SMP)
|
||||
struct cacheline_padding {
|
||||
char x[0];
|
||||
} ____cacheline_internodealigned_in_smp;
|
||||
#define CACHELINE_PADDING(name) struct cacheline_padding name
|
||||
#else
|
||||
#define CACHELINE_PADDING(name)
|
||||
#endif
|
||||
|
||||
#endif /* __LINUX_CACHE_H */
|
||||
|
|
|
@ -185,15 +185,6 @@ struct mem_cgroup_thresholds {
|
|||
struct mem_cgroup_threshold_ary *spare;
|
||||
};
|
||||
|
||||
#if defined(CONFIG_SMP)
|
||||
struct memcg_padding {
|
||||
char x[0];
|
||||
} ____cacheline_internodealigned_in_smp;
|
||||
#define MEMCG_PADDING(name) struct memcg_padding name
|
||||
#else
|
||||
#define MEMCG_PADDING(name)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Remember four most recent foreign writebacks with dirty pages in this
|
||||
* cgroup. Inode sharing is expected to be uncommon and, even if we miss
|
||||
|
@ -304,7 +295,7 @@ struct mem_cgroup {
|
|||
spinlock_t move_lock;
|
||||
unsigned long move_lock_flags;
|
||||
|
||||
MEMCG_PADDING(_pad1_);
|
||||
CACHELINE_PADDING(_pad1_);
|
||||
|
||||
/* memory.stat */
|
||||
struct memcg_vmstats vmstats;
|
||||
|
@ -326,7 +317,7 @@ struct mem_cgroup {
|
|||
struct list_head objcg_list;
|
||||
#endif
|
||||
|
||||
MEMCG_PADDING(_pad2_);
|
||||
CACHELINE_PADDING(_pad2_);
|
||||
|
||||
/*
|
||||
* set > 0 if pages under this cgroup are moving to other cgroup.
|
||||
|
|
|
@ -121,20 +121,6 @@ static inline bool free_area_empty(struct free_area *area, int migratetype)
|
|||
|
||||
struct pglist_data;
|
||||
|
||||
/*
|
||||
* Add a wild amount of padding here to ensure data fall into separate
|
||||
* cachelines. There are very few zone structures in the machine, so space
|
||||
* consumption is not a concern here.
|
||||
*/
|
||||
#if defined(CONFIG_SMP)
|
||||
struct zone_padding {
|
||||
char x[0];
|
||||
} ____cacheline_internodealigned_in_smp;
|
||||
#define ZONE_PADDING(name) struct zone_padding name;
|
||||
#else
|
||||
#define ZONE_PADDING(name)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_NUMA
|
||||
enum numa_stat_item {
|
||||
NUMA_HIT, /* allocated in intended node */
|
||||
|
@ -837,7 +823,7 @@ struct zone {
|
|||
int initialized;
|
||||
|
||||
/* Write-intensive fields used from the page allocator */
|
||||
ZONE_PADDING(_pad1_)
|
||||
CACHELINE_PADDING(_pad1_);
|
||||
|
||||
/* free areas of different sizes */
|
||||
struct free_area free_area[MAX_ORDER];
|
||||
|
@ -849,7 +835,7 @@ struct zone {
|
|||
spinlock_t lock;
|
||||
|
||||
/* Write-intensive fields used by compaction and vmstats. */
|
||||
ZONE_PADDING(_pad2_)
|
||||
CACHELINE_PADDING(_pad2_);
|
||||
|
||||
/*
|
||||
* When free pages are below this point, additional steps are taken
|
||||
|
@ -886,7 +872,7 @@ struct zone {
|
|||
|
||||
bool contiguous;
|
||||
|
||||
ZONE_PADDING(_pad3_)
|
||||
CACHELINE_PADDING(_pad3_);
|
||||
/* Zone statistics */
|
||||
atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
|
||||
atomic_long_t vm_numa_event[NR_VM_NUMA_EVENT_ITEMS];
|
||||
|
@ -1196,7 +1182,7 @@ typedef struct pglist_data {
|
|||
#endif /* CONFIG_NUMA */
|
||||
|
||||
/* Write-intensive fields used by page reclaim */
|
||||
ZONE_PADDING(_pad1_)
|
||||
CACHELINE_PADDING(_pad1_);
|
||||
|
||||
#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
|
||||
/*
|
||||
|
@ -1241,7 +1227,7 @@ typedef struct pglist_data {
|
|||
struct lru_gen_mm_walk mm_walk;
|
||||
#endif
|
||||
|
||||
ZONE_PADDING(_pad2_)
|
||||
CACHELINE_PADDING(_pad2_);
|
||||
|
||||
/* Per-node vmstats */
|
||||
struct per_cpu_nodestat __percpu *per_cpu_nodestats;
|
||||
|
|
|
@ -7,22 +7,13 @@
|
|||
#include <linux/kernel.h>
|
||||
#include <asm/page.h>
|
||||
|
||||
#if defined(CONFIG_SMP)
|
||||
struct pc_padding {
|
||||
char x[0];
|
||||
} ____cacheline_internodealigned_in_smp;
|
||||
#define PC_PADDING(name) struct pc_padding name
|
||||
#else
|
||||
#define PC_PADDING(name)
|
||||
#endif
|
||||
|
||||
struct page_counter {
|
||||
/*
|
||||
* Make sure 'usage' does not share cacheline with any other field. The
|
||||
* memcg->memory.usage is a hot member of struct mem_cgroup.
|
||||
*/
|
||||
atomic_long_t usage;
|
||||
PC_PADDING(_pad1_);
|
||||
CACHELINE_PADDING(_pad1_);
|
||||
|
||||
/* effective memory.min and memory.min usage tracking */
|
||||
unsigned long emin;
|
||||
|
@ -38,7 +29,7 @@ struct page_counter {
|
|||
unsigned long failcnt;
|
||||
|
||||
/* Keep all the read most fields in a separete cacheline. */
|
||||
PC_PADDING(_pad2_);
|
||||
CACHELINE_PADDING(_pad2_);
|
||||
|
||||
unsigned long min;
|
||||
unsigned long low;
|
||||
|
|
Loading…
Add table
Reference in a new issue