mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-04-02 04:24:05 +00:00
writeback: reorganize mm/backing-dev.c
Move wb_shutdown(), bdi_register(), bdi_register_dev(), bdi_prune_sb(), bdi_remove_from_list() and bdi_unregister() so that init / exit functions are grouped together. This will make updating init / exit paths for cgroup writeback support easier. This is pure source file reorganization. Signed-off-by: Tejun Heo <tj@kernel.org> Reviewed-by: Jan Kara <jack@suse.cz> Cc: Jens Axboe <axboe@kernel.dk> Cc: Wu Fengguang <fengguang.wu@intel.com> Signed-off-by: Jens Axboe <axboe@fb.com>
This commit is contained in:
parent
f0054bb1e1
commit
4610007142
1 changed files with 87 additions and 87 deletions
174
mm/backing-dev.c
174
mm/backing-dev.c
|
@ -285,93 +285,6 @@ void wb_wakeup_delayed(struct bdi_writeback *wb)
|
||||||
spin_unlock_bh(&wb->work_lock);
|
spin_unlock_bh(&wb->work_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Remove bdi from bdi_list, and ensure that it is no longer visible
|
|
||||||
*/
|
|
||||||
static void bdi_remove_from_list(struct backing_dev_info *bdi)
|
|
||||||
{
|
|
||||||
spin_lock_bh(&bdi_lock);
|
|
||||||
list_del_rcu(&bdi->bdi_list);
|
|
||||||
spin_unlock_bh(&bdi_lock);
|
|
||||||
|
|
||||||
synchronize_rcu_expedited();
|
|
||||||
}
|
|
||||||
|
|
||||||
int bdi_register(struct backing_dev_info *bdi, struct device *parent,
|
|
||||||
const char *fmt, ...)
|
|
||||||
{
|
|
||||||
va_list args;
|
|
||||||
struct device *dev;
|
|
||||||
|
|
||||||
if (bdi->dev) /* The driver needs to use separate queues per device */
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
va_start(args, fmt);
|
|
||||||
dev = device_create_vargs(bdi_class, parent, MKDEV(0, 0), bdi, fmt, args);
|
|
||||||
va_end(args);
|
|
||||||
if (IS_ERR(dev))
|
|
||||||
return PTR_ERR(dev);
|
|
||||||
|
|
||||||
bdi->dev = dev;
|
|
||||||
|
|
||||||
bdi_debug_register(bdi, dev_name(dev));
|
|
||||||
set_bit(WB_registered, &bdi->wb.state);
|
|
||||||
|
|
||||||
spin_lock_bh(&bdi_lock);
|
|
||||||
list_add_tail_rcu(&bdi->bdi_list, &bdi_list);
|
|
||||||
spin_unlock_bh(&bdi_lock);
|
|
||||||
|
|
||||||
trace_writeback_bdi_register(bdi);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL(bdi_register);
|
|
||||||
|
|
||||||
int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev)
|
|
||||||
{
|
|
||||||
return bdi_register(bdi, NULL, "%u:%u", MAJOR(dev), MINOR(dev));
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL(bdi_register_dev);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Remove bdi from the global list and shutdown any threads we have running
|
|
||||||
*/
|
|
||||||
static void wb_shutdown(struct bdi_writeback *wb)
|
|
||||||
{
|
|
||||||
/* Make sure nobody queues further work */
|
|
||||||
spin_lock_bh(&wb->work_lock);
|
|
||||||
if (!test_and_clear_bit(WB_registered, &wb->state)) {
|
|
||||||
spin_unlock_bh(&wb->work_lock);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
spin_unlock_bh(&wb->work_lock);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Drain work list and shutdown the delayed_work. !WB_registered
|
|
||||||
* tells wb_workfn() that @wb is dying and its work_list needs to
|
|
||||||
* be drained no matter what.
|
|
||||||
*/
|
|
||||||
mod_delayed_work(bdi_wq, &wb->dwork, 0);
|
|
||||||
flush_delayed_work(&wb->dwork);
|
|
||||||
WARN_ON(!list_empty(&wb->work_list));
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Called when the device behind @bdi has been removed or ejected.
|
|
||||||
*
|
|
||||||
* We can't really do much here except for reducing the dirty ratio at
|
|
||||||
* the moment. In the future we should be able to set a flag so that
|
|
||||||
* the filesystem can handle errors at mark_inode_dirty time instead
|
|
||||||
* of only at writeback time.
|
|
||||||
*/
|
|
||||||
void bdi_unregister(struct backing_dev_info *bdi)
|
|
||||||
{
|
|
||||||
if (WARN_ON_ONCE(!bdi->dev))
|
|
||||||
return;
|
|
||||||
|
|
||||||
bdi_set_min_ratio(bdi, 0);
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL(bdi_unregister);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Initial write bandwidth: 100 MB/s
|
* Initial write bandwidth: 100 MB/s
|
||||||
*/
|
*/
|
||||||
|
@ -418,6 +331,29 @@ static int wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Remove bdi from the global list and shutdown any threads we have running
|
||||||
|
*/
|
||||||
|
static void wb_shutdown(struct bdi_writeback *wb)
|
||||||
|
{
|
||||||
|
/* Make sure nobody queues further work */
|
||||||
|
spin_lock_bh(&wb->work_lock);
|
||||||
|
if (!test_and_clear_bit(WB_registered, &wb->state)) {
|
||||||
|
spin_unlock_bh(&wb->work_lock);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
spin_unlock_bh(&wb->work_lock);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Drain work list and shutdown the delayed_work. !WB_registered
|
||||||
|
* tells wb_workfn() that @wb is dying and its work_list needs to
|
||||||
|
* be drained no matter what.
|
||||||
|
*/
|
||||||
|
mod_delayed_work(bdi_wq, &wb->dwork, 0);
|
||||||
|
flush_delayed_work(&wb->dwork);
|
||||||
|
WARN_ON(!list_empty(&wb->work_list));
|
||||||
|
}
|
||||||
|
|
||||||
static void wb_exit(struct bdi_writeback *wb)
|
static void wb_exit(struct bdi_writeback *wb)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
@ -449,6 +385,70 @@ int bdi_init(struct backing_dev_info *bdi)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(bdi_init);
|
EXPORT_SYMBOL(bdi_init);
|
||||||
|
|
||||||
|
int bdi_register(struct backing_dev_info *bdi, struct device *parent,
|
||||||
|
const char *fmt, ...)
|
||||||
|
{
|
||||||
|
va_list args;
|
||||||
|
struct device *dev;
|
||||||
|
|
||||||
|
if (bdi->dev) /* The driver needs to use separate queues per device */
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
va_start(args, fmt);
|
||||||
|
dev = device_create_vargs(bdi_class, parent, MKDEV(0, 0), bdi, fmt, args);
|
||||||
|
va_end(args);
|
||||||
|
if (IS_ERR(dev))
|
||||||
|
return PTR_ERR(dev);
|
||||||
|
|
||||||
|
bdi->dev = dev;
|
||||||
|
|
||||||
|
bdi_debug_register(bdi, dev_name(dev));
|
||||||
|
set_bit(WB_registered, &bdi->wb.state);
|
||||||
|
|
||||||
|
spin_lock_bh(&bdi_lock);
|
||||||
|
list_add_tail_rcu(&bdi->bdi_list, &bdi_list);
|
||||||
|
spin_unlock_bh(&bdi_lock);
|
||||||
|
|
||||||
|
trace_writeback_bdi_register(bdi);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(bdi_register);
|
||||||
|
|
||||||
|
int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev)
|
||||||
|
{
|
||||||
|
return bdi_register(bdi, NULL, "%u:%u", MAJOR(dev), MINOR(dev));
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(bdi_register_dev);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Remove bdi from bdi_list, and ensure that it is no longer visible
|
||||||
|
*/
|
||||||
|
static void bdi_remove_from_list(struct backing_dev_info *bdi)
|
||||||
|
{
|
||||||
|
spin_lock_bh(&bdi_lock);
|
||||||
|
list_del_rcu(&bdi->bdi_list);
|
||||||
|
spin_unlock_bh(&bdi_lock);
|
||||||
|
|
||||||
|
synchronize_rcu_expedited();
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Called when the device behind @bdi has been removed or ejected.
|
||||||
|
*
|
||||||
|
* We can't really do much here except for reducing the dirty ratio at
|
||||||
|
* the moment. In the future we should be able to set a flag so that
|
||||||
|
* the filesystem can handle errors at mark_inode_dirty time instead
|
||||||
|
* of only at writeback time.
|
||||||
|
*/
|
||||||
|
void bdi_unregister(struct backing_dev_info *bdi)
|
||||||
|
{
|
||||||
|
if (WARN_ON_ONCE(!bdi->dev))
|
||||||
|
return;
|
||||||
|
|
||||||
|
bdi_set_min_ratio(bdi, 0);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(bdi_unregister);
|
||||||
|
|
||||||
void bdi_destroy(struct backing_dev_info *bdi)
|
void bdi_destroy(struct backing_dev_info *bdi)
|
||||||
{
|
{
|
||||||
/* make sure nobody finds us on the bdi_list anymore */
|
/* make sure nobody finds us on the bdi_list anymore */
|
||||||
|
|
Loading…
Add table
Reference in a new issue