inode: convert inode_stat.nr_unused to per-cpu counters

Before we split up the inode_lru_lock, the unused inode counter
needs to be made independent of the global inode_lru_lock. Convert
it to per-cpu counters to do this.

Signed-off-by: Dave Chinner <dchinner@redhat.com>
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
This commit is contained in:
Dave Chinner 2011-07-08 14:14:38 +10:00 committed by Al Viro
parent e9299f5058
commit fcb94f72d3

View file

@ -95,6 +95,7 @@ EXPORT_SYMBOL(empty_aops);
struct inodes_stat_t inodes_stat; struct inodes_stat_t inodes_stat;
static DEFINE_PER_CPU(unsigned int, nr_inodes); static DEFINE_PER_CPU(unsigned int, nr_inodes);
static DEFINE_PER_CPU(unsigned int, nr_unused);
static struct kmem_cache *inode_cachep __read_mostly; static struct kmem_cache *inode_cachep __read_mostly;
@ -109,7 +110,11 @@ static int get_nr_inodes(void)
static inline int get_nr_inodes_unused(void) static inline int get_nr_inodes_unused(void)
{ {
return inodes_stat.nr_unused; int i;
int sum = 0;
for_each_possible_cpu(i)
sum += per_cpu(nr_unused, i);
return sum < 0 ? 0 : sum;
} }
int get_nr_dirty_inodes(void) int get_nr_dirty_inodes(void)
@ -127,6 +132,7 @@ int proc_nr_inodes(ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos) void __user *buffer, size_t *lenp, loff_t *ppos)
{ {
inodes_stat.nr_inodes = get_nr_inodes(); inodes_stat.nr_inodes = get_nr_inodes();
inodes_stat.nr_unused = get_nr_inodes_unused();
return proc_dointvec(table, write, buffer, lenp, ppos); return proc_dointvec(table, write, buffer, lenp, ppos);
} }
#endif #endif
@ -340,7 +346,7 @@ static void inode_lru_list_add(struct inode *inode)
spin_lock(&inode_lru_lock); spin_lock(&inode_lru_lock);
if (list_empty(&inode->i_lru)) { if (list_empty(&inode->i_lru)) {
list_add(&inode->i_lru, &inode_lru); list_add(&inode->i_lru, &inode_lru);
inodes_stat.nr_unused++; this_cpu_inc(nr_unused);
} }
spin_unlock(&inode_lru_lock); spin_unlock(&inode_lru_lock);
} }
@ -350,7 +356,7 @@ static void inode_lru_list_del(struct inode *inode)
spin_lock(&inode_lru_lock); spin_lock(&inode_lru_lock);
if (!list_empty(&inode->i_lru)) { if (!list_empty(&inode->i_lru)) {
list_del_init(&inode->i_lru); list_del_init(&inode->i_lru);
inodes_stat.nr_unused--; this_cpu_dec(nr_unused);
} }
spin_unlock(&inode_lru_lock); spin_unlock(&inode_lru_lock);
} }
@ -656,7 +662,7 @@ static void prune_icache(int nr_to_scan)
(inode->i_state & ~I_REFERENCED)) { (inode->i_state & ~I_REFERENCED)) {
list_del_init(&inode->i_lru); list_del_init(&inode->i_lru);
spin_unlock(&inode->i_lock); spin_unlock(&inode->i_lock);
inodes_stat.nr_unused--; this_cpu_dec(nr_unused);
continue; continue;
} }
@ -693,7 +699,7 @@ static void prune_icache(int nr_to_scan)
spin_unlock(&inode->i_lock); spin_unlock(&inode->i_lock);
list_move(&inode->i_lru, &freeable); list_move(&inode->i_lru, &freeable);
inodes_stat.nr_unused--; this_cpu_dec(nr_unused);
} }
if (current_is_kswapd()) if (current_is_kswapd())
__count_vm_events(KSWAPD_INODESTEAL, reap); __count_vm_events(KSWAPD_INODESTEAL, reap);