mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-06-27 09:02:06 +00:00
writeback: remove bdi_start_writeback()
bdi_start_writeback() is a thin wrapper on top of __wb_start_writeback() which is used only by laptop_mode_timer_fn(). This patches removes bdi_start_writeback(), renames __wb_start_writeback() to wb_start_writeback() and makes laptop_mode_timer_fn() use it instead. This doesn't cause any functional difference and will ease making laptop_mode_timer_fn() cgroup writeback aware. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Jens Axboe <axboe@kernel.dk> Cc: Jan Kara <jack@suse.cz> Signed-off-by: Jens Axboe <axboe@fb.com>
This commit is contained in:
parent
ebe41ab0c7
commit
c00ddad39f
3 changed files with 29 additions and 47 deletions
|
@ -184,33 +184,6 @@ out_unlock:
|
|||
spin_unlock_bh(&wb->work_lock);
|
||||
}
|
||||
|
||||
static void __wb_start_writeback(struct bdi_writeback *wb, long nr_pages,
|
||||
bool range_cyclic, enum wb_reason reason)
|
||||
{
|
||||
struct wb_writeback_work *work;
|
||||
|
||||
if (!wb_has_dirty_io(wb))
|
||||
return;
|
||||
|
||||
/*
|
||||
* This is WB_SYNC_NONE writeback, so if allocation fails just
|
||||
* wakeup the thread for old dirty data writeback
|
||||
*/
|
||||
work = kzalloc(sizeof(*work), GFP_ATOMIC);
|
||||
if (!work) {
|
||||
trace_writeback_nowork(wb->bdi);
|
||||
wb_wakeup(wb);
|
||||
return;
|
||||
}
|
||||
|
||||
work->sync_mode = WB_SYNC_NONE;
|
||||
work->nr_pages = nr_pages;
|
||||
work->range_cyclic = range_cyclic;
|
||||
work->reason = reason;
|
||||
|
||||
wb_queue_work(wb, work);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_CGROUP_WRITEBACK
|
||||
|
||||
/**
|
||||
|
@ -240,22 +213,31 @@ EXPORT_SYMBOL_GPL(inode_congested);
|
|||
|
||||
#endif /* CONFIG_CGROUP_WRITEBACK */
|
||||
|
||||
/**
|
||||
* bdi_start_writeback - start writeback
|
||||
* @bdi: the backing device to write from
|
||||
* @nr_pages: the number of pages to write
|
||||
* @reason: reason why some writeback work was initiated
|
||||
*
|
||||
* Description:
|
||||
* This does WB_SYNC_NONE opportunistic writeback. The IO is only
|
||||
* started when this function returns, we make no guarantees on
|
||||
* completion. Caller need not hold sb s_umount semaphore.
|
||||
*
|
||||
*/
|
||||
void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages,
|
||||
enum wb_reason reason)
|
||||
void wb_start_writeback(struct bdi_writeback *wb, long nr_pages,
|
||||
bool range_cyclic, enum wb_reason reason)
|
||||
{
|
||||
__wb_start_writeback(&bdi->wb, nr_pages, true, reason);
|
||||
struct wb_writeback_work *work;
|
||||
|
||||
if (!wb_has_dirty_io(wb))
|
||||
return;
|
||||
|
||||
/*
|
||||
* This is WB_SYNC_NONE writeback, so if allocation fails just
|
||||
* wakeup the thread for old dirty data writeback
|
||||
*/
|
||||
work = kzalloc(sizeof(*work), GFP_ATOMIC);
|
||||
if (!work) {
|
||||
trace_writeback_nowork(wb->bdi);
|
||||
wb_wakeup(wb);
|
||||
return;
|
||||
}
|
||||
|
||||
work->sync_mode = WB_SYNC_NONE;
|
||||
work->nr_pages = nr_pages;
|
||||
work->range_cyclic = range_cyclic;
|
||||
work->reason = reason;
|
||||
|
||||
wb_queue_work(wb, work);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1219,7 +1201,7 @@ void wakeup_flusher_threads(long nr_pages, enum wb_reason reason)
|
|||
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(bdi, &bdi_list, bdi_list)
|
||||
__wb_start_writeback(&bdi->wb, nr_pages, false, reason);
|
||||
wb_start_writeback(&bdi->wb, nr_pages, false, reason);
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue