mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-06-30 19:06:14 +00:00
writeback: remove wb_list
The wb_list member of struct backing_device_info always has exactly one element. Just use the direct bdi->wb pointer instead and simplify some code. Also remove bdi_task_init which is now trivial to prepare for the next patch. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
This commit is contained in:
parent
4c4762d10f
commit
c1955ce32f
3 changed files with 32 additions and 60 deletions
|
@ -73,9 +73,9 @@ static void bdi_queue_work(struct backing_dev_info *bdi,
|
||||||
* If the default thread isn't there, make sure we add it. When
|
* If the default thread isn't there, make sure we add it. When
|
||||||
* it gets created and wakes up, we'll run this work.
|
* it gets created and wakes up, we'll run this work.
|
||||||
*/
|
*/
|
||||||
if (unlikely(list_empty_careful(&bdi->wb_list)))
|
if (unlikely(!bdi->wb.task)) {
|
||||||
wake_up_process(default_backing_dev_info.wb.task);
|
wake_up_process(default_backing_dev_info.wb.task);
|
||||||
else {
|
} else {
|
||||||
struct bdi_writeback *wb = &bdi->wb;
|
struct bdi_writeback *wb = &bdi->wb;
|
||||||
|
|
||||||
if (wb->task)
|
if (wb->task)
|
||||||
|
|
|
@ -45,8 +45,6 @@ enum bdi_stat_item {
|
||||||
#define BDI_STAT_BATCH (8*(1+ilog2(nr_cpu_ids)))
|
#define BDI_STAT_BATCH (8*(1+ilog2(nr_cpu_ids)))
|
||||||
|
|
||||||
struct bdi_writeback {
|
struct bdi_writeback {
|
||||||
struct list_head list; /* hangs off the bdi */
|
|
||||||
|
|
||||||
struct backing_dev_info *bdi; /* our parent bdi */
|
struct backing_dev_info *bdi; /* our parent bdi */
|
||||||
unsigned int nr;
|
unsigned int nr;
|
||||||
|
|
||||||
|
@ -80,8 +78,7 @@ struct backing_dev_info {
|
||||||
unsigned int max_ratio, max_prop_frac;
|
unsigned int max_ratio, max_prop_frac;
|
||||||
|
|
||||||
struct bdi_writeback wb; /* default writeback info for this bdi */
|
struct bdi_writeback wb; /* default writeback info for this bdi */
|
||||||
spinlock_t wb_lock; /* protects update side of wb_list */
|
spinlock_t wb_lock; /* protects work_list */
|
||||||
struct list_head wb_list; /* the flusher threads hanging off this bdi */
|
|
||||||
|
|
||||||
struct list_head work_list;
|
struct list_head work_list;
|
||||||
|
|
||||||
|
|
|
@ -65,28 +65,21 @@ static void bdi_debug_init(void)
|
||||||
static int bdi_debug_stats_show(struct seq_file *m, void *v)
|
static int bdi_debug_stats_show(struct seq_file *m, void *v)
|
||||||
{
|
{
|
||||||
struct backing_dev_info *bdi = m->private;
|
struct backing_dev_info *bdi = m->private;
|
||||||
struct bdi_writeback *wb;
|
struct bdi_writeback *wb = &bdi->wb;
|
||||||
unsigned long background_thresh;
|
unsigned long background_thresh;
|
||||||
unsigned long dirty_thresh;
|
unsigned long dirty_thresh;
|
||||||
unsigned long bdi_thresh;
|
unsigned long bdi_thresh;
|
||||||
unsigned long nr_dirty, nr_io, nr_more_io, nr_wb;
|
unsigned long nr_dirty, nr_io, nr_more_io, nr_wb;
|
||||||
struct inode *inode;
|
struct inode *inode;
|
||||||
|
|
||||||
/*
|
|
||||||
* inode lock is enough here, the bdi->wb_list is protected by
|
|
||||||
* RCU on the reader side
|
|
||||||
*/
|
|
||||||
nr_wb = nr_dirty = nr_io = nr_more_io = 0;
|
nr_wb = nr_dirty = nr_io = nr_more_io = 0;
|
||||||
spin_lock(&inode_lock);
|
spin_lock(&inode_lock);
|
||||||
list_for_each_entry(wb, &bdi->wb_list, list) {
|
|
||||||
nr_wb++;
|
|
||||||
list_for_each_entry(inode, &wb->b_dirty, i_list)
|
list_for_each_entry(inode, &wb->b_dirty, i_list)
|
||||||
nr_dirty++;
|
nr_dirty++;
|
||||||
list_for_each_entry(inode, &wb->b_io, i_list)
|
list_for_each_entry(inode, &wb->b_io, i_list)
|
||||||
nr_io++;
|
nr_io++;
|
||||||
list_for_each_entry(inode, &wb->b_more_io, i_list)
|
list_for_each_entry(inode, &wb->b_more_io, i_list)
|
||||||
nr_more_io++;
|
nr_more_io++;
|
||||||
}
|
|
||||||
spin_unlock(&inode_lock);
|
spin_unlock(&inode_lock);
|
||||||
|
|
||||||
get_dirty_limits(&background_thresh, &dirty_thresh, &bdi_thresh, bdi);
|
get_dirty_limits(&background_thresh, &dirty_thresh, &bdi_thresh, bdi);
|
||||||
|
@ -98,19 +91,16 @@ static int bdi_debug_stats_show(struct seq_file *m, void *v)
|
||||||
"BdiDirtyThresh: %8lu kB\n"
|
"BdiDirtyThresh: %8lu kB\n"
|
||||||
"DirtyThresh: %8lu kB\n"
|
"DirtyThresh: %8lu kB\n"
|
||||||
"BackgroundThresh: %8lu kB\n"
|
"BackgroundThresh: %8lu kB\n"
|
||||||
"WritebackThreads: %8lu\n"
|
|
||||||
"b_dirty: %8lu\n"
|
"b_dirty: %8lu\n"
|
||||||
"b_io: %8lu\n"
|
"b_io: %8lu\n"
|
||||||
"b_more_io: %8lu\n"
|
"b_more_io: %8lu\n"
|
||||||
"bdi_list: %8u\n"
|
"bdi_list: %8u\n"
|
||||||
"state: %8lx\n"
|
"state: %8lx\n",
|
||||||
"wb_list: %8u\n",
|
|
||||||
(unsigned long) K(bdi_stat(bdi, BDI_WRITEBACK)),
|
(unsigned long) K(bdi_stat(bdi, BDI_WRITEBACK)),
|
||||||
(unsigned long) K(bdi_stat(bdi, BDI_RECLAIMABLE)),
|
(unsigned long) K(bdi_stat(bdi, BDI_RECLAIMABLE)),
|
||||||
K(bdi_thresh), K(dirty_thresh),
|
K(bdi_thresh), K(dirty_thresh),
|
||||||
K(background_thresh), nr_wb, nr_dirty, nr_io, nr_more_io,
|
K(background_thresh), nr_dirty, nr_io, nr_more_io,
|
||||||
!list_empty(&bdi->bdi_list), bdi->state,
|
!list_empty(&bdi->bdi_list), bdi->state);
|
||||||
!list_empty(&bdi->wb_list));
|
|
||||||
#undef K
|
#undef K
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -270,24 +260,6 @@ static void bdi_wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi)
|
||||||
INIT_LIST_HEAD(&wb->b_more_io);
|
INIT_LIST_HEAD(&wb->b_more_io);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void bdi_task_init(struct backing_dev_info *bdi,
|
|
||||||
struct bdi_writeback *wb)
|
|
||||||
{
|
|
||||||
struct task_struct *tsk = current;
|
|
||||||
|
|
||||||
spin_lock(&bdi->wb_lock);
|
|
||||||
list_add_tail_rcu(&wb->list, &bdi->wb_list);
|
|
||||||
spin_unlock(&bdi->wb_lock);
|
|
||||||
|
|
||||||
tsk->flags |= PF_FLUSHER | PF_SWAPWRITE;
|
|
||||||
set_freezable();
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Our parent may run at a different priority, just set us to normal
|
|
||||||
*/
|
|
||||||
set_user_nice(tsk, 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int bdi_start_fn(void *ptr)
|
static int bdi_start_fn(void *ptr)
|
||||||
{
|
{
|
||||||
struct bdi_writeback *wb = ptr;
|
struct bdi_writeback *wb = ptr;
|
||||||
|
@ -301,7 +273,13 @@ static int bdi_start_fn(void *ptr)
|
||||||
list_add_rcu(&bdi->bdi_list, &bdi_list);
|
list_add_rcu(&bdi->bdi_list, &bdi_list);
|
||||||
spin_unlock_bh(&bdi_lock);
|
spin_unlock_bh(&bdi_lock);
|
||||||
|
|
||||||
bdi_task_init(bdi, wb);
|
current->flags |= PF_FLUSHER | PF_SWAPWRITE;
|
||||||
|
set_freezable();
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Our parent may run at a different priority, just set us to normal
|
||||||
|
*/
|
||||||
|
set_user_nice(current, 0);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Clear pending bit and wakeup anybody waiting to tear us down
|
* Clear pending bit and wakeup anybody waiting to tear us down
|
||||||
|
@ -312,12 +290,7 @@ static int bdi_start_fn(void *ptr)
|
||||||
|
|
||||||
ret = bdi_writeback_task(wb);
|
ret = bdi_writeback_task(wb);
|
||||||
|
|
||||||
/*
|
wb->task = NULL;
|
||||||
* Remove us from the list
|
|
||||||
*/
|
|
||||||
spin_lock(&bdi->wb_lock);
|
|
||||||
list_del_rcu(&wb->list);
|
|
||||||
spin_unlock(&bdi->wb_lock);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Flush any work that raced with us exiting. No new work
|
* Flush any work that raced with us exiting. No new work
|
||||||
|
@ -326,7 +299,6 @@ static int bdi_start_fn(void *ptr)
|
||||||
if (!list_empty(&bdi->work_list))
|
if (!list_empty(&bdi->work_list))
|
||||||
wb_do_writeback(wb, 1);
|
wb_do_writeback(wb, 1);
|
||||||
|
|
||||||
wb->task = NULL;
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -391,7 +363,13 @@ static int bdi_forker_task(void *ptr)
|
||||||
{
|
{
|
||||||
struct bdi_writeback *me = ptr;
|
struct bdi_writeback *me = ptr;
|
||||||
|
|
||||||
bdi_task_init(me->bdi, me);
|
current->flags |= PF_FLUSHER | PF_SWAPWRITE;
|
||||||
|
set_freezable();
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Our parent may run at a different priority, just set us to normal
|
||||||
|
*/
|
||||||
|
set_user_nice(current, 0);
|
||||||
|
|
||||||
for (;;) {
|
for (;;) {
|
||||||
struct backing_dev_info *bdi, *tmp;
|
struct backing_dev_info *bdi, *tmp;
|
||||||
|
@ -598,8 +576,6 @@ EXPORT_SYMBOL(bdi_register_dev);
|
||||||
*/
|
*/
|
||||||
static void bdi_wb_shutdown(struct backing_dev_info *bdi)
|
static void bdi_wb_shutdown(struct backing_dev_info *bdi)
|
||||||
{
|
{
|
||||||
struct bdi_writeback *wb;
|
|
||||||
|
|
||||||
if (!bdi_cap_writeback_dirty(bdi))
|
if (!bdi_cap_writeback_dirty(bdi))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
@ -615,14 +591,14 @@ static void bdi_wb_shutdown(struct backing_dev_info *bdi)
|
||||||
bdi_remove_from_list(bdi);
|
bdi_remove_from_list(bdi);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Finally, kill the kernel threads. We don't need to be RCU
|
* Finally, kill the kernel thread. We don't need to be RCU
|
||||||
* safe anymore, since the bdi is gone from visibility. Force
|
* safe anymore, since the bdi is gone from visibility. Force
|
||||||
* unfreeze of the thread before calling kthread_stop(), otherwise
|
* unfreeze of the thread before calling kthread_stop(), otherwise
|
||||||
* it would never exet if it is currently stuck in the refrigerator.
|
* it would never exet if it is currently stuck in the refrigerator.
|
||||||
*/
|
*/
|
||||||
list_for_each_entry(wb, &bdi->wb_list, list) {
|
if (bdi->wb.task) {
|
||||||
thaw_process(wb->task);
|
thaw_process(bdi->wb.task);
|
||||||
kthread_stop(wb->task);
|
kthread_stop(bdi->wb.task);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -667,7 +643,6 @@ int bdi_init(struct backing_dev_info *bdi)
|
||||||
spin_lock_init(&bdi->wb_lock);
|
spin_lock_init(&bdi->wb_lock);
|
||||||
INIT_RCU_HEAD(&bdi->rcu_head);
|
INIT_RCU_HEAD(&bdi->rcu_head);
|
||||||
INIT_LIST_HEAD(&bdi->bdi_list);
|
INIT_LIST_HEAD(&bdi->bdi_list);
|
||||||
INIT_LIST_HEAD(&bdi->wb_list);
|
|
||||||
INIT_LIST_HEAD(&bdi->work_list);
|
INIT_LIST_HEAD(&bdi->work_list);
|
||||||
|
|
||||||
bdi_wb_init(&bdi->wb, bdi);
|
bdi_wb_init(&bdi->wb, bdi);
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue