mirror of
https://github.com/Fishwaldo/linux-bl808.git
synced 2025-06-17 20:25:19 +00:00
block: remove per-queue plugging
Code has been converted over to the new explicit on-stack plugging, and delay users have been converted to use the new API for that. So lets kill off the old plugging along with aops->sync_page(). Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
This commit is contained in:
parent
73c1010119
commit
7eaceaccab
119 changed files with 151 additions and 1269 deletions
31
fs/buffer.c
31
fs/buffer.c
|
@ -54,23 +54,15 @@ init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
|
|||
}
|
||||
EXPORT_SYMBOL(init_buffer);
|
||||
|
||||
static int sync_buffer(void *word)
|
||||
static int sleep_on_buffer(void *word)
|
||||
{
|
||||
struct block_device *bd;
|
||||
struct buffer_head *bh
|
||||
= container_of(word, struct buffer_head, b_state);
|
||||
|
||||
smp_mb();
|
||||
bd = bh->b_bdev;
|
||||
if (bd)
|
||||
blk_run_address_space(bd->bd_inode->i_mapping);
|
||||
io_schedule();
|
||||
return 0;
|
||||
}
|
||||
|
||||
void __lock_buffer(struct buffer_head *bh)
|
||||
{
|
||||
wait_on_bit_lock(&bh->b_state, BH_Lock, sync_buffer,
|
||||
wait_on_bit_lock(&bh->b_state, BH_Lock, sleep_on_buffer,
|
||||
TASK_UNINTERRUPTIBLE);
|
||||
}
|
||||
EXPORT_SYMBOL(__lock_buffer);
|
||||
|
@ -90,7 +82,7 @@ EXPORT_SYMBOL(unlock_buffer);
|
|||
*/
|
||||
void __wait_on_buffer(struct buffer_head * bh)
|
||||
{
|
||||
wait_on_bit(&bh->b_state, BH_Lock, sync_buffer, TASK_UNINTERRUPTIBLE);
|
||||
wait_on_bit(&bh->b_state, BH_Lock, sleep_on_buffer, TASK_UNINTERRUPTIBLE);
|
||||
}
|
||||
EXPORT_SYMBOL(__wait_on_buffer);
|
||||
|
||||
|
@ -749,7 +741,7 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
|
|||
{
|
||||
struct buffer_head *bh;
|
||||
struct list_head tmp;
|
||||
struct address_space *mapping, *prev_mapping = NULL;
|
||||
struct address_space *mapping;
|
||||
int err = 0, err2;
|
||||
|
||||
INIT_LIST_HEAD(&tmp);
|
||||
|
@ -783,10 +775,6 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
|
|||
* wait_on_buffer() will do that for us
|
||||
* through sync_buffer().
|
||||
*/
|
||||
if (prev_mapping && prev_mapping != mapping)
|
||||
blk_run_address_space(prev_mapping);
|
||||
prev_mapping = mapping;
|
||||
|
||||
brelse(bh);
|
||||
spin_lock(lock);
|
||||
}
|
||||
|
@ -3138,17 +3126,6 @@ out:
|
|||
}
|
||||
EXPORT_SYMBOL(try_to_free_buffers);
|
||||
|
||||
void block_sync_page(struct page *page)
|
||||
{
|
||||
struct address_space *mapping;
|
||||
|
||||
smp_mb();
|
||||
mapping = page_mapping(page);
|
||||
if (mapping)
|
||||
blk_run_backing_dev(mapping->backing_dev_info, page);
|
||||
}
|
||||
EXPORT_SYMBOL(block_sync_page);
|
||||
|
||||
/*
|
||||
* There are no bdflush tunables left. But distributions are
|
||||
* still running obsolete flush daemons, so we terminate them here.
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue